code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
def lowerCamelCase__ ( _A , _A , _A=False ):
if isinstance(_A , _A ) and isinstance(_A , _A ):
a : List[Any] = len(set_a.intersection(_A ) )
if alternative_union:
a : Union[str, Any] = len(_A ) + len(_A )
else:
a : Any = len(set_a.union(_A ) )
return intersection / union
if isinstance(_A , (list, tuple) ) and isinstance(_A , (list, tuple) ):
a : int = [element for element in set_a if element in set_b]
if alternative_union:
a : Dict = len(_A ) + len(_A )
return len(_A ) / union
else:
a : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(_A ) / len(_A )
return len(_A ) / len(_A )
return None
if __name__ == "__main__":
lowerCAmelCase: Any = {'a', 'b', 'c', 'd', 'e'}
lowerCAmelCase: Union[str, Any] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b)) | 297 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = StableUnCLIPImgaImgPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ = frozenset([] )
def lowercase_ ( self : int ):
a : Dict = 32
a : str = embedder_hidden_size
# image encoding components
a : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
a : Dict = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__snake_case , projection_dim=__snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
a : Dict = StableUnCLIPImageNormalizer(embedding_dim=__snake_case )
a : Optional[int] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
a : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
a : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , )
torch.manual_seed(0 )
a : List[Any] = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=__snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a : List[str] = AutoencoderKL()
a : str = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def lowercase_ ( self : Tuple , __snake_case : List[str] , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=True ):
if str(__snake_case ).startswith('mps' ):
a : Tuple = torch.manual_seed(__snake_case )
else:
a : List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
if pil_image:
a : Optional[Any] = input_image * 0.5 + 0.5
a : Optional[Any] = input_image.clamp(0 , 1 )
a : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a : int = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowercase_ ( self : Optional[Any] ):
a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : Union[str, Any] = self.get_dummy_components()
a : Any = StableUnCLIPImgaImgPipeline(**__snake_case )
a : Tuple = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
a : Union[str, Any] = self.get_dummy_inputs(__snake_case )
inputs.update({'image_embeds': None} )
a : str = sd_pipe(**__snake_case ).images
a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a : Optional[int] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self : List[str] ):
a : int = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=__snake_case )
def lowercase_ ( self : int ):
a : Optional[int] = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__snake_case )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__snake_case )
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[Any] ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
a : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
a : Optional[int] = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Optional[int] ):
a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
a : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a : str = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
a : Optional[Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = pipe(
__snake_case , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
a : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 297 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowerCamelCase ( a_ , unittest.TestCase ):
_lowerCamelCase :Dict = ShapEImgaImgPipeline
_lowerCamelCase :Union[str, Any] = ["image"]
_lowerCamelCase :Tuple = ["image"]
_lowerCamelCase :List[Any] = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_lowerCamelCase :int = False
@property
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return 32
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
return 32
@property
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return 8
@property
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowerCAmelCase__ : Any = CLIPVisionModel(UpperCamelCase )
return model
@property
def _lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=UpperCamelCase , do_normalize=UpperCamelCase , do_resize=UpperCamelCase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def _lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : Dict = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
lowerCAmelCase__ : Any = PriorTransformer(**UpperCamelCase )
return model
@property
def _lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : Dict = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
lowerCAmelCase__ : str = ShapERenderer(**UpperCamelCase )
return model
def _lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : int = self.dummy_prior
lowerCAmelCase__ : Tuple = self.dummy_image_encoder
lowerCAmelCase__ : Optional[int] = self.dummy_image_processor
lowerCAmelCase__ : Optional[int] = self.dummy_renderer
lowerCAmelCase__ : Tuple = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase , clip_sample=UpperCamelCase , clip_sample_range=1.0 , )
lowerCAmelCase__ : Union[str, Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _lowerCAmelCase ( self : Any , UpperCamelCase : Any , UpperCamelCase : Optional[int]=0 ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
if str(UpperCamelCase ).startswith("""mps""" ):
lowerCAmelCase__ : int = torch.manual_seed(UpperCamelCase )
else:
lowerCAmelCase__ : Union[str, Any] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = """cpu"""
lowerCAmelCase__ : Tuple = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = self.pipeline_class(**UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowerCAmelCase__ : List[str] = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCAmelCase__ : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = torch_device == """cpu"""
lowerCAmelCase__ : List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase , relax_max_difference=UpperCamelCase , )
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.get_dummy_components()
lowerCAmelCase__ : Any = self.pipeline_class(**UpperCamelCase )
lowerCAmelCase__ : str = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowerCAmelCase__ : List[Any] = 1
lowerCAmelCase__ : Optional[Any] = 2
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(UpperCamelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCAmelCase__ : Any = batch_size * [inputs[key]]
lowerCAmelCase__ : str = pipe(**UpperCamelCase , num_images_per_prompt=UpperCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
lowerCAmelCase__ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
lowerCAmelCase__ : List[Any] = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
lowerCAmelCase__ : str = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowerCAmelCase__ : Dict = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
lowerCAmelCase__ : str = pipe(
UpperCamelCase , generator=UpperCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 350 |
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _lowerCamelCase ( a_ ):
# to overwrite at feature extractactor specific tests
_lowerCamelCase :Optional[int] = None
_lowerCamelCase :List[Any] = None
@property
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase , """feature_size""" ) )
self.assertTrue(hasattr(UpperCamelCase , """sampling_rate""" ) )
self.assertTrue(hasattr(UpperCamelCase , """padding_value""" ) )
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Union[str, Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ : Any = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(UpperCamelCase ) == len(UpperCamelCase ) for x, y in zip(UpperCamelCase , processed_features[input_name] ) ) )
lowerCAmelCase__ : List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase )
lowerCAmelCase__ : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
lowerCAmelCase__ : Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase )
lowerCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Optional[int] = feat_extract.model_input_names[0]
lowerCAmelCase__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
lowerCAmelCase__ : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase )
lowerCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Dict = feat_extract.model_input_names[0]
lowerCAmelCase__ : Tuple = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" )
lowerCAmelCase__ : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Union[str, Any]=False ) -> int:
"""simple docstring"""
def _inputs_have_equal_length(UpperCamelCase : int ):
lowerCAmelCase__ : Optional[Any] = len(input[0] )
for input_slice in input[1:]:
if len(UpperCamelCase ) != length:
return False
return True
def _inputs_are_equal(UpperCamelCase : int , UpperCamelCase : Dict ):
if len(UpperCamelCase ) != len(UpperCamelCase ):
return False
for input_slice_a, input_slice_a in zip(UpperCamelCase , UpperCamelCase ):
if not np.allclose(np.asarray(UpperCamelCase ) , np.asarray(UpperCamelCase ) , atol=1E-3 ):
return False
return True
lowerCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Dict = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase )
lowerCAmelCase__ : str = feat_extract.model_input_names[0]
lowerCAmelCase__ : str = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ : Any = self.feat_extract_tester.seq_length_diff
lowerCAmelCase__ : List[str] = self.feat_extract_tester.max_seq_length + pad_diff
lowerCAmelCase__ : Optional[int] = self.feat_extract_tester.min_seq_length
lowerCAmelCase__ : Optional[int] = self.feat_extract_tester.batch_size
lowerCAmelCase__ : str = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowerCAmelCase__ : List[Any] = feat_extract.pad(UpperCamelCase , padding=UpperCamelCase )
lowerCAmelCase__ : int = input_a[input_name]
lowerCAmelCase__ : Tuple = feat_extract.pad(UpperCamelCase , padding="""longest""" )
lowerCAmelCase__ : Any = input_a[input_name]
lowerCAmelCase__ : List[str] = feat_extract.pad(UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[-1] ) )
lowerCAmelCase__ : Dict = input_a[input_name]
lowerCAmelCase__ : str = feat_extract.pad(UpperCamelCase , padding="""longest""" , return_tensors="""np""" )
lowerCAmelCase__ : Optional[Any] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(UpperCamelCase ):
feat_extract.pad(UpperCamelCase , padding="""max_length""" )[input_name]
lowerCAmelCase__ : Optional[int] = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , max_length=UpperCamelCase , return_tensors="""np""" )
lowerCAmelCase__ : int = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(_inputs_are_equal(UpperCamelCase , UpperCamelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowerCAmelCase__ : Any = feat_extract.pad(UpperCamelCase , pad_to_multiple_of=10 )
lowerCAmelCase__ : int = input_a[input_name]
lowerCAmelCase__ : Dict = feat_extract.pad(UpperCamelCase , padding="""longest""" , pad_to_multiple_of=10 )
lowerCAmelCase__ : Union[str, Any] = input_a[input_name]
lowerCAmelCase__ : Optional[int] = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=UpperCamelCase )
lowerCAmelCase__ : str = input_a[input_name]
lowerCAmelCase__ : Union[str, Any] = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=UpperCamelCase , return_tensors="""np""" , )
lowerCAmelCase__ : List[Any] = input_a[input_name]
self.assertTrue(all(len(UpperCamelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(UpperCamelCase , UpperCamelCase ) )
lowerCAmelCase__ : Optional[int] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(UpperCamelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowerCAmelCase__ : str = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : int=False ) -> List[str]:
"""simple docstring"""
def _inputs_have_equal_length(UpperCamelCase : List[Any] ):
lowerCAmelCase__ : List[str] = len(input[0] )
for input_slice in input[1:]:
if len(UpperCamelCase ) != length:
return False
return True
def _inputs_are_equal(UpperCamelCase : Union[str, Any] , UpperCamelCase : str ):
if len(UpperCamelCase ) != len(UpperCamelCase ):
return False
for input_slice_a, input_slice_a in zip(UpperCamelCase , UpperCamelCase ):
if not np.allclose(np.asarray(UpperCamelCase ) , np.asarray(UpperCamelCase ) , atol=1E-3 ):
return False
return True
lowerCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase )
lowerCAmelCase__ : str = feat_extract.model_input_names[0]
lowerCAmelCase__ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowerCAmelCase__ : str = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=UpperCamelCase )
lowerCAmelCase__ : Dict = input_a[input_name]
lowerCAmelCase__ : str = feat_extract.pad(UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) )
lowerCAmelCase__ : Optional[int] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertFalse(_inputs_have_equal_length(UpperCamelCase ) )
# truncate to smallest with np
lowerCAmelCase__ : Tuple = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=UpperCamelCase , )
lowerCAmelCase__ : List[Any] = input_a[input_name]
lowerCAmelCase__ : List[Any] = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" )
lowerCAmelCase__ : Union[str, Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCamelCase ) )
# truncate to middle
lowerCAmelCase__ : List[Any] = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=UpperCamelCase , return_tensors="""np""" , )
lowerCAmelCase__ : List[Any] = input_a[input_name]
lowerCAmelCase__ : List[Any] = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=UpperCamelCase )
lowerCAmelCase__ : List[str] = input_a[input_name]
lowerCAmelCase__ : Any = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" )
lowerCAmelCase__ : Dict = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(_inputs_are_equal(UpperCamelCase , UpperCamelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase ):
feat_extract.pad(UpperCamelCase , truncation=UpperCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase ):
feat_extract.pad(UpperCamelCase , padding="""longest""" , truncation=UpperCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase ):
feat_extract.pad(UpperCamelCase , padding="""longest""" , truncation=UpperCamelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(UpperCamelCase ):
feat_extract.pad(UpperCamelCase , padding="""max_length""" , truncation=UpperCamelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowerCAmelCase__ : Any = 12
lowerCAmelCase__ : Optional[int] = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCamelCase , truncation=UpperCamelCase , )
lowerCAmelCase__ : List[Any] = input_a[input_name]
lowerCAmelCase__ : Any = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCamelCase , )
lowerCAmelCase__ : Optional[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowerCAmelCase__ : Union[str, Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowerCAmelCase__ : Any = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertFalse(_inputs_have_equal_length(UpperCamelCase ) )
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self._check_padding(numpify=UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
self._check_padding(numpify=UpperCamelCase )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self._check_truncation(numpify=UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._check_truncation(numpify=UpperCamelCase )
@require_torch
def _lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : str = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ : str = feat_extract.model_input_names[0]
lowerCAmelCase__ : Optional[int] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ : Dict = feat_extract.pad(UpperCamelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
lowerCAmelCase__ : str = feat_extract.pad(UpperCamelCase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def _lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : Tuple = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ : List[Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ : int = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ : int = feat_extract.pad(UpperCamelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
lowerCAmelCase__ : Any = feat_extract.pad(UpperCamelCase , padding="""longest""" , return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Any = self.feat_extract_dict
lowerCAmelCase__ : int = True
lowerCAmelCase__ : str = self.feature_extraction_class(**UpperCamelCase )
lowerCAmelCase__ : int = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ : List[Any] = [len(UpperCamelCase ) for x in speech_inputs]
lowerCAmelCase__ : Optional[Any] = feat_extract.model_input_names[0]
lowerCAmelCase__ : List[str] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ : Tuple = feat_extract.pad(UpperCamelCase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , UpperCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.feat_extract_dict
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : str = self.feature_extraction_class(**UpperCamelCase )
lowerCAmelCase__ : int = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ : Any = [len(UpperCamelCase ) for x in speech_inputs]
lowerCAmelCase__ : int = feat_extract.model_input_names[0]
lowerCAmelCase__ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ : Any = min(UpperCamelCase )
lowerCAmelCase__ : Tuple = feat_extract.pad(
UpperCamelCase , padding="""max_length""" , max_length=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , UpperCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 212 | 0 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowercase__ : Union[str, Any] = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class SCREAMING_SNAKE_CASE__ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ = " " )-> str:
'''simple docstring'''
__UpperCamelCase = sentence_delimiter
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> List[Any]:
'''simple docstring'''
return list(SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
__UpperCamelCase = []
for sent_idx, sentence in enumerate(SCREAMING_SNAKE_CASE_ ):
chars.extend(self.process_string(SCREAMING_SNAKE_CASE_ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(SCREAMING_SNAKE_CASE_ ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowercase__ : Optional[int] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowercase__ : Optional[Any] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowercase__ : Optional[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
lowercase__ : Optional[int] = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
lowercase__ : str = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False )-> List[str]:
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truth_transform=SCREAMING_SNAKE_CASE_ , hypothesis_transform=SCREAMING_SNAKE_CASE_ , )["wer"]
__UpperCamelCase = 0
__UpperCamelCase = 0
for prediction, reference in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = jiwer.compute_measures(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truth_transform=SCREAMING_SNAKE_CASE_ , hypothesis_transform=SCREAMING_SNAKE_CASE_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 328 |
import math
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
return math.sqrt(snake_case ) * math.sqrt(snake_case ) == num
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = n
while left <= right:
__UpperCamelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 1 |
def UpperCAmelCase_ ( _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
if index == r:
for j in range(_A ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
SCREAMING_SNAKE_CASE__ = arr[i]
combination_util(_A , _A , _A , index + 1 , _A , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_A , _A , _A , _A , _A , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_A , _A , _A , 0 , _A , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_SCREAMING_SNAKE_CASE : int = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 363 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "vivit"
def __init__( self : str , __lowerCamelCase : List[Any]=224 , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : Tuple=[2, 16, 16] , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[Any]=768 , __lowerCamelCase : Any=12 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Any="gelu_fast" , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Any=1e-06 , __lowerCamelCase : Dict=True , **__lowerCamelCase : Any , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = num_frames
SCREAMING_SNAKE_CASE__ = tubelet_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = qkv_bias
super().__init__(**__lowerCamelCase )
| 218 | 0 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_UpperCamelCase: Any = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
_UpperCamelCase: Any = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
_UpperCamelCase: str = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
_UpperCamelCase: Optional[Any] = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def lowercase ( self : Any ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('string', id='sequence' ),
'references': datasets.Value('string', id='sequence' ),
} ), codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'], reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
], )
def lowercase ( self : Tuple, lowerCAmelCase : Optional[Any] ) -> Tuple:
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def lowercase ( self : Dict, lowerCAmelCase : List[Any], lowerCAmelCase : Dict, lowerCAmelCase : Optional[Any]=0.9, lowerCAmelCase : List[Any]=3, lowerCAmelCase : List[str]=0.5 ) -> Any:
if NLTK_VERSION >= version.Version('3.6.5' ):
lowercase : Tuple = [
meteor_score.single_meteor_score(
word_tokenize(lowerCAmelCase ), word_tokenize(lowerCAmelCase ), alpha=lowerCAmelCase, beta=lowerCAmelCase, gamma=lowerCAmelCase )
for ref, pred in zip(lowerCAmelCase, lowerCAmelCase )
]
else:
lowercase : List[Any] = [
meteor_score.single_meteor_score(lowerCAmelCase, lowerCAmelCase, alpha=lowerCAmelCase, beta=lowerCAmelCase, gamma=lowerCAmelCase )
for ref, pred in zip(lowerCAmelCase, lowerCAmelCase )
]
return {"meteor": np.mean(lowerCAmelCase )}
| 255 | """simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
__A = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
__A = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class _snake_case ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = RealmTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Any=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]="[UNK]" , UpperCAmelCase : Tuple="[SEP]" , UpperCAmelCase : List[str]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : List[Any]="[MASK]" , UpperCAmelCase : str=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Any , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
__lowerCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
__lowerCamelCase : str = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
__lowerCamelCase : Any = do_lower_case
__lowerCamelCase : List[Any] = strip_accents
__lowerCamelCase : Optional[Any] = tokenize_chinese_chars
__lowerCamelCase : int = normalizer_class(**UpperCAmelCase )
__lowerCamelCase : List[Any] = do_lower_case
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Dict , **UpperCAmelCase : int ):
__lowerCamelCase : Optional[int] = PaddingStrategy.MAX_LENGTH
__lowerCamelCase : List[Any] = text
__lowerCamelCase : Optional[int] = kwargs.pop("text_pair" , UpperCAmelCase )
__lowerCamelCase : List[Any] = kwargs.pop("return_tensors" , UpperCAmelCase )
__lowerCamelCase : Dict = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(UpperCAmelCase ):
if batch_text_pair is not None:
__lowerCamelCase : List[str] = batch_text_pair[idx]
else:
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : List[str] = super().__call__(UpperCAmelCase , UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = encoded_candidates.get("input_ids" )
__lowerCamelCase : Optional[int] = encoded_candidates.get("attention_mask" )
__lowerCamelCase : int = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = {key: item for key, item in output_data.items() if len(UpperCAmelCase ) != 0}
return BatchEncoding(UpperCAmelCase , tensor_type=UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None ):
__lowerCamelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
__lowerCamelCase : Tuple = [self.sep_token_id]
__lowerCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
__lowerCamelCase : Any = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase ) | 135 | 0 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__snake_case : Any = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
__snake_case : List[str] = parser.parse_args()
__snake_case : int = """cpu"""
__snake_case : Tuple = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
__snake_case : str = """path-to-your-trained-model"""
__snake_case : str = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__snake_case : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__snake_case : str = pipe.to(device)
# to channels last
__snake_case : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
__snake_case : Any = pipe.vae.to(memory_format=torch.channels_last)
__snake_case : int = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__snake_case : int = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__snake_case : Tuple = torch.randn(2, 4, 64, 64)
__snake_case : Union[str, Any] = torch.rand(1) * 9_99
__snake_case : Dict = torch.randn(2, 77, 7_68)
__snake_case : str = (sample, timestep, encoder_hidden_status)
try:
__snake_case : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__snake_case : str = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__snake_case : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__snake_case : Any = 6_66
__snake_case : str = torch.Generator(device).manual_seed(seed)
__snake_case : Any = {"""generator""": generator}
if args.steps is not None:
__snake_case : str = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__snake_case : Tuple = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 370 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : Tuple = BarthezTokenizer
_SCREAMING_SNAKE_CASE : int = BarthezTokenizerFast
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Tuple = True
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_UpperCamelCase )
lowerCAmelCase__ = tokenizer
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = '<pad>'
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_UpperCamelCase ) , 10_11_22 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCAmelCase__ = [0, 57, 30_18, 7_03_07, 91, 2]
lowerCAmelCase__ = self.tokenizer(
_UpperCamelCase , max_length=len(_UpperCamelCase ) , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors='pt' )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
lowerCAmelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ = tokenizer.tokenize(_UpperCamelCase )
lowerCAmelCase__ = rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase__ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = tokenizer.encode(_UpperCamelCase )
lowerCAmelCase__ = rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# fmt: off
lowerCAmelCase__ = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowerCAmelCase__ = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_UpperCamelCase , )
| 122 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : str =get_tests_dir('fixtures/test_sentencepiece.model')
__lowerCAmelCase : Optional[int] ={'target_lang': 'fi', 'source_lang': 'en'}
__lowerCAmelCase : Optional[int] ='>>zh<<'
__lowerCAmelCase : Tuple ='Helsinki-NLP/'
if is_torch_available():
__lowerCAmelCase : int ='pt'
elif is_tf_available():
__lowerCAmelCase : Any ='tf'
else:
__lowerCAmelCase : str ='jax'
@require_sentencepiece
class _lowercase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = MarianTokenizer
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Any = True
def __magic_name__( self :List[str] ) -> Dict:
super().setUp()
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
__SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(__a , range(len(__a ) ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = Path(self.tmpdirname )
save_json(__a , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(__a , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__a , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(__a , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
__SCREAMING_SNAKE_CASE : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__( self :Optional[int] , **lowerCAmelCase__ :Any ) -> Any:
return MarianTokenizer.from_pretrained(self.tmpdirname , **__a )
def __magic_name__( self :int , lowerCAmelCase__ :List[Any] ) -> Dict:
return (
"This is a test",
"This is a test",
)
def __magic_name__( self :Any ) -> List[str]:
__SCREAMING_SNAKE_CASE : Any = '''</s>'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def __magic_name__( self :int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(__a ) , 9 )
def __magic_name__( self :Dict ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[str] = MarianTokenizer.from_pretrained(f'''{ORG_NAME}opus-mt-en-de''' )
__SCREAMING_SNAKE_CASE : Any = en_de_tokenizer(['''I am a small frog'''] , return_tensors=__a )
self.assertIsInstance(__a , __a )
__SCREAMING_SNAKE_CASE : Any = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(__a , batch.input_ids[0] )
__SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__a )
__SCREAMING_SNAKE_CASE : List[Any] = [x.name for x in Path(__a ).glob('''*''' )]
self.assertIn('''source.spm''' , __a )
MarianTokenizer.from_pretrained(__a )
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = tok(
['''I am a small frog''' * 1_000, '''I am a small frog'''] , padding=__a , truncation=__a , return_tensors=__a )
self.assertIsInstance(__a , __a )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def __magic_name__( self :str ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Dict = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=__a , return_tensors=__a )
self.assertIsInstance(__a , __a )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __magic_name__( self :List[Any] ) -> List[str]:
# fmt: off
__SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[str] = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
__SCREAMING_SNAKE_CASE : Dict = '''Tämä on testi'''
__SCREAMING_SNAKE_CASE : str = '''This is a test'''
__SCREAMING_SNAKE_CASE : Optional[Any] = [76, 7, 2_047, 2]
__SCREAMING_SNAKE_CASE : List[str] = [69, 12, 11, 940, 2]
__SCREAMING_SNAKE_CASE : List[str] = tokenizer(__a ).input_ids
self.assertListEqual(__a , __a )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer(text_target=__a ).input_ids
self.assertListEqual(__a , __a )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(__a , skip_special_tokens=__a )
self.assertEqual(__a , __a )
| 9 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , *,
__a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ):
super().__init__()
_a = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
_a = nn.Linear(__a , __a )
_a = nn.Linear(__a , __a )
# parameters for encoder hidden states
_a = clip_extra_context_tokens
_a = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
_a = nn.Linear(__a , __a )
_a = nn.LayerNorm(__a )
def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_a = image_embeddings.shape[0]
_a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_a = classifier_free_guidance_embeddings.expand(
__a , -1 )
_a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_a = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_a = self.embedding_proj(__a )
_a = self.clip_image_embeddings_project_to_time_embeddings(__a )
_a = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_a = self.clip_extra_context_tokens_proj(__a )
_a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
_a = clip_extra_context_tokens.permute(0 , 2 , 1 )
_a = self.encoder_hidden_states_proj(__a )
_a = self.text_encoder_hidden_states_norm(__a )
_a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 63 | 0 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __a ( nn.Module ):
def __init__( self : Tuple ):
super().__init__()
lowerCAmelCase_ : str = nn.Linear(3 , 4 )
lowerCAmelCase_ : str = nn.BatchNormad(4 )
lowerCAmelCase_ : List[Any] = nn.Linear(4 , 5 )
def A ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ):
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase ) ) )
class __a ( unittest.TestCase ):
def A ( self : Optional[Any] ):
lowerCAmelCase_ : int = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase , model.state_dict() )
lowerCAmelCase_ : Any = os.path.join(UpperCAmelCase , """index.json""" )
self.assertTrue(os.path.isfile(UpperCAmelCase ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
lowerCAmelCase_ : Optional[int] = os.path.join(UpperCAmelCase , F'{key}.dat' )
self.assertTrue(os.path.isfile(UpperCAmelCase ) )
# TODO: add tests on the fact weights are properly loaded
def A ( self : List[Any] ):
lowerCAmelCase_ : Union[str, Any] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
lowerCAmelCase_ : List[str] = torch.randn(2 , 3 , dtype=UpperCAmelCase )
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : List[str] = offload_weight(UpperCAmelCase , """weight""" , UpperCAmelCase , {} )
lowerCAmelCase_ : Optional[Any] = os.path.join(UpperCAmelCase , """weight.dat""" )
self.assertTrue(os.path.isfile(UpperCAmelCase ) )
self.assertDictEqual(UpperCAmelCase , {"""weight""": {"""shape""": [2, 3], """dtype""": str(UpperCAmelCase ).split(""".""" )[1]}} )
lowerCAmelCase_ : Optional[Any] = load_offloaded_weight(UpperCAmelCase , index["""weight"""] )
self.assertTrue(torch.equal(UpperCAmelCase , UpperCAmelCase ) )
def A ( self : int ):
lowerCAmelCase_ : Optional[Any] = ModelForTest()
lowerCAmelCase_ : Optional[int] = model.state_dict()
lowerCAmelCase_ : List[Any] = {k: v for k, v in state_dict.items() if """linear2""" not in k}
lowerCAmelCase_ : Any = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = OffloadedWeightsLoader(state_dict=UpperCAmelCase , save_folder=UpperCAmelCase )
# Every key is there with the right value
self.assertEqual(sorted(UpperCAmelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCAmelCase , weight_map[key] ) )
lowerCAmelCase_ : Dict = {k: v for k, v in state_dict.items() if """weight""" in k}
lowerCAmelCase_ : Dict = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = OffloadedWeightsLoader(state_dict=UpperCAmelCase , save_folder=UpperCAmelCase )
# Every key is there with the right value
self.assertEqual(sorted(UpperCAmelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCAmelCase , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase , UpperCAmelCase )
# Duplicates are removed
lowerCAmelCase_ : List[str] = OffloadedWeightsLoader(state_dict=UpperCAmelCase , save_folder=UpperCAmelCase )
# Every key is there with the right value
self.assertEqual(sorted(UpperCAmelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCAmelCase , weight_map[key] ) )
def A ( self : str ):
lowerCAmelCase_ : Optional[Any] = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
lowerCAmelCase_ : Dict = extract_submodules_state_dict(UpperCAmelCase , ["""a.1""", """a.2"""] )
self.assertDictEqual(UpperCAmelCase , {"""a.1""": 0, """a.2""": 2} )
lowerCAmelCase_ : Union[str, Any] = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
lowerCAmelCase_ : Optional[Any] = extract_submodules_state_dict(UpperCAmelCase , ["""a.1""", """a.2"""] )
self.assertDictEqual(UpperCAmelCase , {"""a.1.a""": 0, """a.2.a""": 2} )
| 360 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __a :
def __init__( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple=13 , UpperCAmelCase : Any=64 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Any=3 , UpperCAmelCase : Any=True , UpperCAmelCase : str=True , UpperCAmelCase : str=32 , UpperCAmelCase : str=5 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Dict=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=10 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Optional[Any]=[1, 16, 4, 4] , UpperCAmelCase : Union[str, Any]=None , ):
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : str = batch_size
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : Tuple = patch_size
lowerCAmelCase_ : Union[str, Any] = num_channels
lowerCAmelCase_ : List[str] = is_training
lowerCAmelCase_ : List[str] = use_labels
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : int = scope
lowerCAmelCase_ : Tuple = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCAmelCase_ : int = (self.image_size // 32) ** 2
lowerCAmelCase_ : Dict = num_patches + 1
def A ( self : Any ):
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[int] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
lowerCAmelCase_ : List[Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCAmelCase , )
def A ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Tuple = ViTHybridModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ):
lowerCAmelCase_ : Tuple = self.type_sequence_label_size
lowerCAmelCase_ : Tuple = ViTHybridForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : int = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = config_and_inputs
lowerCAmelCase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : List[str] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__snake_case : Dict = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__snake_case : int = False
__snake_case : Tuple = False
__snake_case : Tuple = False
def A ( self : int ):
lowerCAmelCase_ : Union[str, Any] = ViTHybridModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def A ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def A ( self : Dict ):
pass
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A ( self : List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : str = model_class(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[str] = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Union[str, Any] = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(config=UpperCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCAmelCase_ : Tuple = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def A ( self : int ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Union[str, Any] = ViTHybridModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __UpperCamelCase ( ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def A ( self : int ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Tuple ):
lowerCAmelCase_ : Union[str, Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Optional[int] = image_processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Any = model(**UpperCAmelCase )
# verify the logits
lowerCAmelCase_ : Any = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
lowerCAmelCase_ : Optional[Any] = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : List[str] = image_processor(images=UpperCAmelCase , return_tensors="""pt""" )
lowerCAmelCase_ : Optional[Any] = model(**UpperCAmelCase )
lowerCAmelCase_ : List[str] = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCAmelCase_ : List[str] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 28 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'informer'
SCREAMING_SNAKE_CASE : Optional[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Tuple ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[int] = None ,lowercase__ : str = "student_t" ,lowercase__ : str = "nll" ,lowercase__ : int = 1 ,lowercase__ : List[int] = None ,lowercase__ : Optional[Union[str, bool]] = "mean" ,lowercase__ : int = 0 ,lowercase__ : int = 0 ,lowercase__ : int = 0 ,lowercase__ : int = 0 ,lowercase__ : Optional[List[int]] = None ,lowercase__ : Optional[List[int]] = None ,lowercase__ : int = 6_4 ,lowercase__ : int = 3_2 ,lowercase__ : int = 3_2 ,lowercase__ : int = 2 ,lowercase__ : int = 2 ,lowercase__ : int = 2 ,lowercase__ : int = 2 ,lowercase__ : bool = True ,lowercase__ : str = "gelu" ,lowercase__ : float = 0.0_5 ,lowercase__ : float = 0.1 ,lowercase__ : float = 0.1 ,lowercase__ : float = 0.1 ,lowercase__ : float = 0.1 ,lowercase__ : int = 1_0_0 ,lowercase__ : float = 0.0_2 ,lowercase__ : Any=True ,lowercase__ : str = "prob" ,lowercase__ : int = 5 ,lowercase__ : bool = True ,**lowercase__ : List[str] ,):
# time series specific configuration
__lowercase = prediction_length
__lowercase = context_length or prediction_length
__lowercase = distribution_output
__lowercase = loss
__lowercase = input_size
__lowercase = num_time_features
__lowercase = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__lowercase = scaling
__lowercase = num_dynamic_real_features
__lowercase = num_static_real_features
__lowercase = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowercase__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__lowercase = cardinality
else:
__lowercase = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowercase__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__lowercase = embedding_dimension
else:
__lowercase = [min(5_0 ,(cat + 1) // 2 ) for cat in self.cardinality]
__lowercase = num_parallel_samples
# Transformer architecture configuration
__lowercase = input_size * len(self.lags_sequence ) + self._number_of_features
__lowercase = d_model
__lowercase = encoder_attention_heads
__lowercase = decoder_attention_heads
__lowercase = encoder_ffn_dim
__lowercase = decoder_ffn_dim
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = activation_function
__lowercase = init_std
__lowercase = use_cache
# Informer
__lowercase = attention_type
__lowercase = sampling_factor
__lowercase = distil
super().__init__(is_encoder_decoder=lowercase__ ,**lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 104 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = x
__lowercase = y
for step in range(A__ ): # noqa: B007
__lowercase = a * a - b * b + x
__lowercase = 2 * a * b + y
__lowercase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _A ( A__ ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _A ( A__ ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(A__ , 1 , 1 ) )
def _A ( A__ = 800 , A__ = 600 , A__ = -0.6 , A__ = 0 , A__ = 3.2 , A__ = 50 , A__ = True , ):
"""simple docstring"""
__lowercase = Image.new('''RGB''' , (image_width, image_height) )
__lowercase = img.load()
# loop through the image-coordinates
for image_x in range(A__ ):
for image_y in range(A__ ):
# determine the figure-coordinates based on the image-coordinates
__lowercase = figure_width / image_width * image_height
__lowercase = figure_center_x + (image_x / image_width - 0.5) * figure_width
__lowercase = figure_center_y + (image_y / image_height - 0.5) * figure_height
__lowercase = get_distance(A__ , A__ , A__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__lowercase = get_color_coded_rgb(A__ )
else:
__lowercase = get_black_and_white_rgb(A__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 104 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCAmelCase : List[Any] = 5_00_00
lowerCAmelCase : Dict = 50_00
lowerCAmelCase , lowerCAmelCase : Optional[Any] = os.path.split(__file__)
lowerCAmelCase : Dict = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def lowercase (_A , _A ):
"""simple docstring"""
for i in range(_A ):
_lowerCAmelCase : List[str] = dataset[i]
@get_duration
def lowercase (_A , _A , _A ):
"""simple docstring"""
for i in range(0 , len(_A ) , _A ):
_lowerCAmelCase : List[Any] = dataset[i : i + batch_size]
@get_duration
def lowercase (_A , _A , _A ):
"""simple docstring"""
with dataset.formatted_as(type=_A ):
for i in range(_A ):
_lowerCAmelCase : List[Any] = dataset[i]
@get_duration
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
with dataset.formatted_as(type=_A ):
for i in range(0 , _A , _A ):
_lowerCAmelCase : List[Any] = dataset[i : i + batch_size]
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = {'num examples': SPEED_TEST_N_EXAMPLES}
_lowerCAmelCase : Tuple = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0_0}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0_0_0}),
]
_lowerCAmelCase : Union[str, Any] = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0_0}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0_0_0}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
_lowerCAmelCase : Tuple = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
_lowerCAmelCase : Union[str, Any] = generate_example_dataset(
os.path.join(_A , 'dataset.arrow' ) , _A , num_examples=_A , seq_shapes={'list': (1_0_0,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(_A ) )
_lowerCAmelCase : List[str] = func(_A , **_A )
print('shuffling dataset' )
_lowerCAmelCase : Optional[int] = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(_A ) )
_lowerCAmelCase : List[Any] = func(
_A , **_A )
with open(_A , 'wb' ) as f:
f.write(json.dumps(_A ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 25 |
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag.
lowerCAmelCase : Optional[int] = 1 # The second color of the flag.
lowerCAmelCase : int = 2 # The third color of the flag.
lowerCAmelCase : Any = (red, white, blue)
def lowercase (_A ):
"""simple docstring"""
if not sequence:
return []
if len(_A ) == 1:
return list(_A )
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : List[str] = len(_A ) - 1
_lowerCAmelCase : Optional[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid]
high -= 1
else:
_lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(_A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip()
lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 25 | 1 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __lowerCAmelCase :
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
def _lowercase ( self ) -> int:
'''simple docstring'''
raise NotImplementedError()
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : str =tokenizer
a__ : List[str] =skip_prompt
a__ : List[Any] =decode_kwargs
# variables used in the streaming process
a__ : Dict =[]
a__ : int =0
a__ : str =True
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
a__ : Any =value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
a__ : Dict =False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
a__ : List[Any] =text[self.print_len :]
a__ : List[str] =[]
a__ : Optional[int] =0
# If the last token is a CJK character, we print the characters.
elif len(lowerCAmelCase__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
a__ : List[str] =text[self.print_len :]
self.print_len += len(lowerCAmelCase__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
a__ : str =text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(lowerCAmelCase__ )
self.on_finalized_text(lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
if len(self.token_cache ) > 0:
a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
a__ : List[Any] =text[self.print_len :]
a__ : List[str] =[]
a__ : Optional[int] =0
else:
a__ : Union[str, Any] =""
a__ : Any =True
self.on_finalized_text(lowerCAmelCase__ , stream_end=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Optional[Any]:
'''simple docstring'''
print(lowerCAmelCase__ , flush=lowerCAmelCase__ , end="" if not stream_end else None )
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : str =Queue()
a__ : Optional[Any] =None
a__ : Any =timeout
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> List[str]:
'''simple docstring'''
self.text_queue.put(lowerCAmelCase__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> Dict:
'''simple docstring'''
return self
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : int =self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 95 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __SCREAMING_SNAKE_CASE( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
_UpperCAmelCase = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def a_ ( ) -> Any:
"""simple docstring"""
if os.name == "nt":
snake_case__ = CursorInfo()
snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) )
snake_case__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ) -> Tuple:
"""simple docstring"""
if os.name == "nt":
snake_case__ = CursorInfo()
snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) )
snake_case__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ) -> str:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 307 | 0 |
'''simple docstring'''
from math import factorial
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ = real
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = [1] * rank
else:
lowerCAmelCase__ = rank
def __repr__( self ) -> List[str]:
return (
F"""{self.real}+"""
F"""{"+".join(str(lowerCamelCase_ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase_ )
def __add__( self , lowerCamelCase_ ) -> Tuple:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return Dual(self.real + other , self.duals )
lowerCAmelCase__ = self.duals.copy()
lowerCAmelCase__ = other.duals.copy()
if len(lowerCamelCase_ ) > len(lowerCamelCase_ ):
o_dual.extend([1] * (len(lowerCamelCase_ ) - len(lowerCamelCase_ )) )
elif len(lowerCamelCase_ ) < len(lowerCamelCase_ ):
s_dual.extend([1] * (len(lowerCamelCase_ ) - len(lowerCamelCase_ )) )
lowerCAmelCase__ = []
for i in range(len(lowerCamelCase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase_ )
lowercase__ : Tuple = __add__
def __sub__( self , lowerCamelCase_ ) -> List[str]:
return self + other * -1
def __mul__( self , lowerCamelCase_ ) -> Dict:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase_ )
lowerCAmelCase__ = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase_ )
lowercase__ : Union[str, Any] = __mul__
def __truediv__( self , lowerCamelCase_ ) -> Tuple:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase_ )
raise ValueError
def __floordiv__( self , lowerCamelCase_ ) -> Union[str, Any]:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase_ )
raise ValueError
def __pow__( self , lowerCamelCase_ ) -> Optional[int]:
if n < 0 or isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('''power must be a positive integer''' )
if n == 0:
return 1
if n == 1:
return self
lowerCAmelCase__ = self
for _ in range(n - 1 ):
x *= self
return x
def _snake_case ( A , A , A ) -> int:
if not callable(A ):
raise ValueError('''differentiate() requires a function as input for func''' )
if not isinstance(A , (float, int) ):
raise ValueError('''differentiate() requires a float as input for position''' )
if not isinstance(A , A ):
raise ValueError('''differentiate() requires an int as input for order''' )
lowerCAmelCase__ = Dual(A , 1 )
lowerCAmelCase__ = func(A )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _snake_case ( A ) -> str:
return y**2 * y**4
print(differentiate(f, 9, 2)) | 228 |
'''simple docstring'''
import numpy as np
import datasets
__UpperCAmelCase = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
__UpperCAmelCase = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
__UpperCAmelCase = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> str:
# convert to numpy arrays
lowerCAmelCase__ = np.array(lowerCamelCase_ )
lowerCAmelCase__ = np.array(lowerCamelCase_ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
lowerCAmelCase__ = X - np.mean(lowerCamelCase_ )
lowerCAmelCase__ = np.cov(reference_distribution.T )
try:
lowerCAmelCase__ = np.linalg.inv(lowerCamelCase_ )
except np.linalg.LinAlgError:
lowerCAmelCase__ = np.linalg.pinv(lowerCamelCase_ )
lowerCAmelCase__ = np.dot(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = np.dot(lowerCamelCase_ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 228 | 1 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __snake_case ( _lowercase):
def __init__( self : List[Any] , __lowerCAmelCase : int = 1_0_1 ):
"""simple docstring"""
_lowerCamelCase : Dict = length
def __len__( self : Dict ):
"""simple docstring"""
return self.length
def __getitem__( self : List[Any] , __lowerCAmelCase : str ):
"""simple docstring"""
return i
class __snake_case :
def __call__( self : str , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return {"input_ids": torch.tensor(__lowerCAmelCase ), "labels": torch.tensor(__lowerCAmelCase )}
class __snake_case ( nn.Module):
def __init__( self : str ):
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_lowerCamelCase : List[Any] = nn.Linear(1_2_0 , 8_0 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str=None ):
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __snake_case ( _lowercase):
@require_torch_neuroncore
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = f'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
_lowerCamelCase : int = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Optional[int] = f'''--output_dir {output_dir}'''.split()
_lowerCamelCase : int = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(__lowerCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __snake_case ( _lowercase):
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : int = f'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
_lowerCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Tuple = f'''--output_dir {output_dir}'''.split()
_lowerCamelCase : str = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(__lowerCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowerCAmelCase__ = HfArgumentParser((TrainingArguments,))
lowerCAmelCase__ = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
lowerCAmelCase__ = DummyDataset(dataset_length)
def snake_case_ ( A_ : EvalPrediction ):
'''simple docstring'''
_lowerCamelCase : str = list(range(len(A_ ) ) )
_lowerCamelCase : List[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
F'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
lowerCAmelCase__ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowerCAmelCase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCAmelCase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCAmelCase__ = 2
lowerCAmelCase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCAmelCase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCAmelCase__ = None
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : int = 2_00_00_00 ):
'''simple docstring'''
_lowerCamelCase : int = [0 for i in range(n + 1 )]
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Any = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, A_ ):
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = 0
for i in range(A_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case (__lowercase):
__A : Tuple =["image_processor", "tokenizer"]
__A : List[str] ="LayoutLMv3ImageProcessor"
__A : Tuple =("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self ,_snake_case=None ,_snake_case=None ,**_snake_case ):
UpperCAmelCase_ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,_a ,)
UpperCAmelCase_ : Any = kwargs.pop("feature_extractor" )
UpperCAmelCase_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_a ,_a )
def __call__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = True ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = 0 ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = False ,_snake_case = False ,_snake_case = True ,_snake_case = None ,**_snake_case ,):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
UpperCAmelCase_ : str = self.image_processor(images=_a ,return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a ,_a ):
UpperCAmelCase_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Tuple = features["words"]
UpperCAmelCase_ : Tuple = self.tokenizer(
text=text if text is not None else features["words"] ,text_pair=text_pair if text_pair is not None else None ,boxes=boxes if boxes is not None else features["boxes"] ,word_labels=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel values
UpperCAmelCase_ : Dict = features.pop("pixel_values" )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Tuple = self.get_overflowing_images(_a ,encoded_inputs["overflow_to_sample_mapping"] )
UpperCAmelCase_ : Optional[int] = images
return encoded_inputs
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : Optional[Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f''' {len(_a )} and {len(_a )}''' )
return images_with_overflow
def UpperCamelCase__ ( self ,*_snake_case ,**_snake_case ):
return self.tokenizer.batch_decode(*_a ,**_a )
def UpperCamelCase__ ( self ,*_snake_case ,**_snake_case ):
return self.tokenizer.decode(*_a ,**_a )
@property
def UpperCamelCase__ ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCamelCase__ ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." ,_a ,)
return self.image_processor_class
@property
def UpperCamelCase__ ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." ,_a ,)
return self.image_processor
| 350 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
UpperCAmelCase_ : int = update_area_of_max_square(_SCREAMING_SNAKE_CASE , col + 1 )
UpperCAmelCase_ : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
UpperCAmelCase_ : Union[str, Any] = update_area_of_max_square(row + 1 , _SCREAMING_SNAKE_CASE )
if mat[row][col]:
UpperCAmelCase_ : List[str] = 1 + min([right, diagonal, down] )
UpperCAmelCase_ : Dict = max(largest_square_area[0] , _SCREAMING_SNAKE_CASE )
return sub_problem_sol
else:
return 0
UpperCAmelCase_ : List[Any] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
UpperCAmelCase_ : List[str] = update_area_of_max_square_using_dp_array(_SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if mat[row][col]:
UpperCAmelCase_ : Any = 1 + min([right, diagonal, down] )
UpperCAmelCase_ : int = max(largest_square_area[0] , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = sub_problem_sol
return sub_problem_sol
else:
return 0
UpperCAmelCase_ : Any = [0]
UpperCAmelCase_ : Optional[Any] = [[-1] * cols for _ in range(_SCREAMING_SNAKE_CASE )]
update_area_of_max_square_using_dp_array(0 , 0 , _SCREAMING_SNAKE_CASE )
return largest_square_area[0]
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : int = [[0] * (cols + 1) for _ in range(rows + 1 )]
UpperCAmelCase_ : Union[str, Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCAmelCase_ : Union[str, Any] = dp_array[row][col + 1]
UpperCAmelCase_ : str = dp_array[row + 1][col + 1]
UpperCAmelCase_ : Dict = dp_array[row + 1][col]
if mat[row][col] == 1:
UpperCAmelCase_ : Union[str, Any] = 1 + min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = max(dp_array[row][col] , _SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Union[str, Any] = 0
return largest_square_area
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : str = [0] * (cols + 1)
UpperCAmelCase_ : List[str] = [0] * (cols + 1)
UpperCAmelCase_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCAmelCase_ : Optional[Any] = current_row[col + 1]
UpperCAmelCase_ : Tuple = next_row[col + 1]
UpperCAmelCase_ : List[Any] = next_row[col]
if mat[row][col] == 1:
UpperCAmelCase_ : List[str] = 1 + min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = max(current_row[col] , _SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Optional[int] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 67 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase__ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase__ = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
UpperCAmelCase__ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class lowerCAmelCase__ ( A_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_INIT_CONFIGURATION
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = BertTokenizer
def __init__( self : Dict , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=True , _lowerCamelCase : str="[UNK]" , _lowerCamelCase : List[str]="[SEP]" , _lowerCamelCase : List[Any]="[PAD]" , _lowerCamelCase : List[str]="[CLS]" , _lowerCamelCase : int="[MASK]" , _lowerCamelCase : Tuple=True , _lowerCamelCase : str=None , **_lowerCamelCase : Union[str, Any] , ):
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
_snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowerCamelCase ) != tokenize_chinese_chars
):
_snake_case = getattr(_lowerCamelCase , normalizer_state.pop('''type''' ) )
_snake_case = do_lower_case
_snake_case = strip_accents
_snake_case = tokenize_chinese_chars
_snake_case = normalizer_class(**_lowerCamelCase )
_snake_case = do_lower_case
def lowercase ( self : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : str=None ):
_snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Tuple , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Tuple , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
_snake_case = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 288 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _UpperCAmelCase ( __lowerCamelCase : str ) -> List[Any]:
return 1 / (1 + np.exp(-z ))
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) -> Optional[Any]:
return (-y * np.log(__lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Dict ) -> List[str]:
_snake_case = np.dot(__lowerCamelCase , __lowerCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__lowerCamelCase ) ) )
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=7_00_00 ) -> Optional[Any]:
_snake_case = np.zeros(x.shape[1] )
for iterations in range(__lowerCamelCase ):
_snake_case = np.dot(__lowerCamelCase , __lowerCamelCase )
_snake_case = sigmoid_function(__lowerCamelCase )
_snake_case = np.dot(x.T , h - y ) / y.size
_snake_case = theta - alpha * gradient # updating the weights
_snake_case = np.dot(__lowerCamelCase , __lowerCamelCase )
_snake_case = sigmoid_function(__lowerCamelCase )
_snake_case = cost_function(__lowerCamelCase , __lowerCamelCase )
if iterations % 1_00 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCAmelCase__ = datasets.load_iris()
UpperCAmelCase__ = iris.data[:, :2]
UpperCAmelCase__ = (iris.target != 0) * 1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = logistic_reg(alpha, x, y, max_iterations=70000)
print('theta: ', theta) # printing the theta i.e our weights vector
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Union[str, Any]:
return sigmoid_function(
np.dot(__lowerCamelCase , __lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((UpperCAmelCase__) , (UpperCAmelCase__)) = (x[:, 0].min(), x[:, 0].max())
((UpperCAmelCase__) , (UpperCAmelCase__)) = (x[:, 1].min(), x[:, 1].max())
((UpperCAmelCase__) , (UpperCAmelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCAmelCase__ = np.c_[xxa.ravel(), xxa.ravel()]
UpperCAmelCase__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 288 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["ConditionalDetrFeatureExtractor"]
a_ = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 163 |
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
a_ = namedtuple("covid_data", "cases deaths recovered")
def a__ ( __lowercase = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
_A = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(__lowercase ).content ).xpath(__lowercase ) )
a_ = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats())) | 163 | 1 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Dict = 1
lowercase__ : Tuple = 3
lowercase__ : str = (32, 32)
lowercase__ : Tuple = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(_snake_case )
return image
@property
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : str = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,)
return model
@property
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : int = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
return model
@property
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModel(_snake_case )
@property
def UpperCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
def extract(*_snake_case : int ,**_snake_case : int ):
class __A :
'''simple docstring'''
def __init__( self : int ) -> List[str]:
"""simple docstring"""
lowercase__ : Union[str, Any] = torch.ones([0] )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ) -> Any:
"""simple docstring"""
self.pixel_values.to(_snake_case )
return self
return Out()
return extract
def UpperCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
lowercase__ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : List[Any] = self.dummy_cond_unet
lowercase__ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
lowercase__ : str = self.dummy_vae
lowercase__ : int = self.dummy_text_encoder
lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase__ : str = StableDiffusionPipeline(
unet=_snake_case ,scheduler=_snake_case ,vae=_snake_case ,text_encoder=_snake_case ,tokenizer=_snake_case ,safety_checker=_snake_case ,feature_extractor=self.dummy_extractor ,)
lowercase__ : Dict = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Dict = '''A painting of a squirrel eating a burger'''
lowercase__ : List[Any] = torch.Generator(device=_snake_case ).manual_seed(0 )
lowercase__ : int = sd_pipe([prompt] ,generator=_snake_case ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' )
lowercase__ : Any = output.images
lowercase__ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(0 )
lowercase__ : int = sd_pipe(
[prompt] ,generator=_snake_case ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' ,return_dict=_snake_case ,)[0]
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowercase__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : Tuple = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : Union[str, Any] = self.dummy_cond_unet
lowercase__ : Union[str, Any] = PNDMScheduler(skip_prk_steps=_snake_case )
lowercase__ : str = self.dummy_vae
lowercase__ : int = self.dummy_text_encoder
lowercase__ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase__ : Optional[int] = StableDiffusionPipeline(
unet=_snake_case ,scheduler=_snake_case ,vae=_snake_case ,text_encoder=_snake_case ,tokenizer=_snake_case ,safety_checker=_snake_case ,feature_extractor=self.dummy_extractor ,)
lowercase__ : Any = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : int = '''A painting of a squirrel eating a burger'''
lowercase__ : str = torch.Generator(device=_snake_case ).manual_seed(0 )
lowercase__ : Optional[Any] = sd_pipe([prompt] ,generator=_snake_case ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' )
lowercase__ : int = output.images
lowercase__ : Tuple = torch.Generator(device=_snake_case ).manual_seed(0 )
lowercase__ : int = sd_pipe(
[prompt] ,generator=_snake_case ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' ,return_dict=_snake_case ,)[0]
lowercase__ : List[str] = image[0, -3:, -3:, -1]
lowercase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : int = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' ,safety_checker=_snake_case )
assert isinstance(_snake_case ,_snake_case )
assert isinstance(pipe.scheduler ,_snake_case )
assert pipe.safety_checker is None
lowercase__ : Union[str, Any] = pipe('''example prompt''' ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_snake_case )
lowercase__ : Tuple = StableDiffusionPipeline.from_pretrained(_snake_case )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase__ : str = pipe('''example prompt''' ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' ,'''This test requires a GPU''' )
def UpperCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
lowercase__ : Any = self.dummy_cond_unet
lowercase__ : Optional[Any] = PNDMScheduler(skip_prk_steps=_snake_case )
lowercase__ : int = self.dummy_vae
lowercase__ : List[str] = self.dummy_text_encoder
lowercase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
lowercase__ : Dict = unet.half()
lowercase__ : Any = vae.half()
lowercase__ : Tuple = bert.half()
# make sure here that pndm scheduler skips prk
lowercase__ : Union[str, Any] = StableDiffusionPipeline(
unet=_snake_case ,scheduler=_snake_case ,vae=_snake_case ,text_encoder=_snake_case ,tokenizer=_snake_case ,safety_checker=_snake_case ,feature_extractor=self.dummy_extractor ,)
lowercase__ : str = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Any = '''A painting of a squirrel eating a burger'''
lowercase__ : Union[str, Any] = sd_pipe([prompt] ,num_inference_steps=2 ,output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ : int = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' ,safety_checker=_snake_case )
lowercase__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase__ : str = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[int] = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
lowercase__ : List[Any] = 4_003_660_346
lowercase__ : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
lowercase__ : Optional[Any] = torch.manual_seed(_snake_case )
lowercase__ : str = sd_pipe(
[prompt] ,generator=_snake_case ,guidance_scale=_snake_case ,num_inference_steps=50 ,output_type='''np''' ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
lowercase__ : Dict = output.images
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowercase__ : int = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
lowercase__ : Optional[int] = torch.manual_seed(_snake_case )
lowercase__ : Optional[Any] = sd_pipe(
[prompt] ,generator=_snake_case ,guidance_scale=_snake_case ,num_inference_steps=50 ,output_type='''np''' ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
lowercase__ : List[Any] = output.images
lowercase__ : Dict = image[0, -3:, -3:, -1]
lowercase__ : Optional[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
lowercase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' ,safety_checker=_snake_case )
lowercase__ : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase__ : Optional[Any] = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : List[Any] = '''padme amidala taking a bath artwork, safe for work, no nudity'''
lowercase__ : Tuple = 2_734_971_755
lowercase__ : List[str] = 7
lowercase__ : Dict = torch.manual_seed(_snake_case )
lowercase__ : Union[str, Any] = sd_pipe(
[prompt] ,generator=_snake_case ,guidance_scale=_snake_case ,num_inference_steps=50 ,output_type='''np''' ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
lowercase__ : str = output.images
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
lowercase__ : Optional[Any] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
lowercase__ : Tuple = torch.manual_seed(_snake_case )
lowercase__ : Union[str, Any] = sd_pipe(
[prompt] ,generator=_snake_case ,guidance_scale=_snake_case ,num_inference_steps=50 ,output_type='''np''' ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
lowercase__ : int = output.images
lowercase__ : Dict = image[0, -3:, -3:, -1]
lowercase__ : Optional[int] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : str = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
lowercase__ : int = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[int] = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
lowercase__ : List[Any] = 1_044_355_234
lowercase__ : Optional[int] = 12
lowercase__ : List[Any] = torch.manual_seed(_snake_case )
lowercase__ : Dict = sd_pipe(
[prompt] ,generator=_snake_case ,guidance_scale=_snake_case ,num_inference_steps=50 ,output_type='''np''' ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
lowercase__ : str = output.images
lowercase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowercase__ : List[str] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
lowercase__ : Optional[int] = torch.manual_seed(_snake_case )
lowercase__ : Any = sd_pipe(
[prompt] ,generator=_snake_case ,guidance_scale=_snake_case ,num_inference_steps=50 ,output_type='''np''' ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
lowercase__ : Dict = output.images
lowercase__ : Dict = image[0, -3:, -3:, -1]
lowercase__ : Dict = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 16 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Any=32 ,_snake_case : int=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=[1, 2, 1] ,_snake_case : Dict=[2, 2, 4] ,_snake_case : List[Any]=2 ,_snake_case : Any=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=0.0 ,_snake_case : Union[str, Any]=0.0 ,_snake_case : str=0.1 ,_snake_case : List[Any]="gelu" ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,_snake_case : str=0.02 ,_snake_case : List[str]=1e-5 ,_snake_case : int=True ,_snake_case : Dict=None ,_snake_case : str=True ,_snake_case : List[Any]=10 ,_snake_case : Any=8 ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = parent
lowercase__ : Any = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = patch_size
lowercase__ : int = num_channels
lowercase__ : Any = embed_dim
lowercase__ : int = depths
lowercase__ : Dict = num_heads
lowercase__ : List[Any] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Optional[int] = qkv_bias
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Dict = drop_path_rate
lowercase__ : int = hidden_act
lowercase__ : Tuple = use_absolute_embeddings
lowercase__ : Tuple = patch_norm
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Optional[Any] = initializer_range
lowercase__ : int = is_training
lowercase__ : Optional[int] = scope
lowercase__ : str = use_labels
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Union[str, Any] = encoder_stride
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = SwinvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
lowercase__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : Optional[int] = 1
lowercase__ : List[Any] = SwinvaForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : str = model(_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : str ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.type_sequence_label_size
lowercase__ : Dict = SwinvaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase : Optional[int] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = SwinvaModelTester(self )
lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 )
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = True
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Dict = outputs.attentions
lowercase__ : Any = len(self.model_tester.depths )
self.assertEqual(len(_snake_case ) ,_snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = config.window_size**2
lowercase__ : Any = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
lowercase__ : Optional[Any] = len(_snake_case )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : Tuple = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
if hasattr(self.model_tester ,'''num_hidden_states_types''' ):
lowercase__ : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowercase__ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(_snake_case ) )
lowercase__ : Optional[int] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_snake_case ) ,_snake_case )
# Swinv2 has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowercase__ : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case ) ,_snake_case )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = reshaped_hidden_states[0].shape
lowercase__ : int = (
reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = 3
lowercase__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(config=_snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
_snake_case )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : Dict = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**_snake_case )
# verify the logits
lowercase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
| 16 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __lowerCAmelCase (_UpperCamelCase ):
# vision encoder
if "img_encoder.pos_embed" in name:
__lowerCAmelCase : int = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
__lowerCAmelCase : List[Any] = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
__lowerCAmelCase : List[Any] = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
__lowerCAmelCase : List[str] = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
__lowerCAmelCase : List[Any] = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
__lowerCAmelCase : str = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
__lowerCAmelCase : List[Any] = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
__lowerCAmelCase : Union[str, Any] = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
__lowerCAmelCase : Union[str, Any] = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
__lowerCAmelCase : Optional[Any] = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
__lowerCAmelCase : Union[str, Any] = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
__lowerCAmelCase : List[str] = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
__lowerCAmelCase : List[str] = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
__lowerCAmelCase : Optional[int] = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
__lowerCAmelCase : Tuple = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
__lowerCAmelCase : Tuple = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
__lowerCAmelCase : Optional[Any] = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
__lowerCAmelCase : int = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
__lowerCAmelCase : Optional[int] = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
__lowerCAmelCase : List[str] = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
__lowerCAmelCase : str = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
__lowerCAmelCase : Any = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
__lowerCAmelCase : str = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
__lowerCAmelCase : Optional[Any] = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
for key in orig_state_dict.copy().keys():
__lowerCAmelCase : List[str] = orig_state_dict.pop(_UpperCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase : Dict = key.split('.' )
__lowerCAmelCase , __lowerCAmelCase : str = int(key_split[2] ), int(key_split[4] )
__lowerCAmelCase : Any = config.vision_config.hidden_size
if "weight" in key:
__lowerCAmelCase : Any = val[:dim, :]
__lowerCAmelCase : str = val[dim : dim * 2, :]
__lowerCAmelCase : List[Any] = val[-dim:, :]
else:
__lowerCAmelCase : Optional[Any] = val[:dim]
__lowerCAmelCase : List[Any] = val[dim : dim * 2]
__lowerCAmelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase : Tuple = key.split('.' )
__lowerCAmelCase : Any = int(key_split[3] )
__lowerCAmelCase : str = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase : Dict = val[:dim, :]
__lowerCAmelCase : Tuple = val[
dim : dim * 2, :
]
__lowerCAmelCase : Any = val[-dim:, :]
else:
__lowerCAmelCase : Tuple = val[:dim]
__lowerCAmelCase : Dict = val[dim : dim * 2]
__lowerCAmelCase : Any = val[-dim:]
else:
__lowerCAmelCase : Any = rename_key(_UpperCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__lowerCAmelCase : Dict = val.squeeze_()
else:
__lowerCAmelCase : Optional[int] = val
return orig_state_dict
def __lowerCAmelCase ():
__lowerCAmelCase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase : Tuple = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase="groupvit-gcc-yfcc" , _UpperCamelCase=False ):
__lowerCAmelCase : List[str] = GroupViTConfig()
__lowerCAmelCase : Dict = GroupViTModel(_UpperCamelCase ).eval()
__lowerCAmelCase : List[Any] = torch.load(_UpperCamelCase , map_location='cpu' )['model']
__lowerCAmelCase : Optional[int] = convert_state_dict(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_UpperCamelCase ) == 0)
# verify result
__lowerCAmelCase : int = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
__lowerCAmelCase : int = prepare_img()
__lowerCAmelCase : Any = processor(text=['a photo of a cat', 'a photo of a dog'] , images=_UpperCamelCase , padding=_UpperCamelCase , return_tensors='pt' )
with torch.no_grad():
__lowerCAmelCase : List[str] = model(**_UpperCamelCase )
if model_name == "groupvit-gcc-yfcc":
__lowerCAmelCase : Tuple = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
__lowerCAmelCase : List[Any] = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(F"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image , _UpperCamelCase , atol=1e-3 )
processor.save_pretrained(_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
print('Successfully saved processor and model to' , _UpperCamelCase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(_UpperCamelCase , organization='nielsr' )
model.push_to_hub(_UpperCamelCase , organization='nielsr' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
lowerCamelCase__ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 182 |
"""simple docstring"""
import argparse
import datetime
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
__lowerCAmelCase : Optional[Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_UpperCamelCase ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
__lowerCAmelCase : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
__lowerCAmelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
__lowerCAmelCase : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
__lowerCAmelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
__lowerCAmelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
__lowerCAmelCase : Tuple = datetime.date(int(_UpperCamelCase ) , int(_UpperCamelCase ) , int(_UpperCamelCase ) )
# Start math
if m <= 2:
__lowerCAmelCase : int = y - 1
__lowerCAmelCase : Tuple = m + 12
# maths var
__lowerCAmelCase : int = int(str(_UpperCamelCase )[:2] )
__lowerCAmelCase : int = int(str(_UpperCamelCase )[2:] )
__lowerCAmelCase : int = int(2.6 * m - 5.39 )
__lowerCAmelCase : int = int(c / 4 )
__lowerCAmelCase : int = int(k / 4 )
__lowerCAmelCase : int = int(d + k )
__lowerCAmelCase : int = int(t + u + v + x )
__lowerCAmelCase : int = int(z - (2 * c) )
__lowerCAmelCase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
__lowerCAmelCase : str = F"Your date {date_input}, is a {days[str(_UpperCamelCase )]}!"
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
lowerCamelCase__ = parser.parse_args()
zeller(args.date_input) | 182 | 1 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def a__ ( __SCREAMING_SNAKE_CASE ) -> List[Any]:
if hor == 1_2_8:
__lowerCAmelCase: Dict = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
__lowerCAmelCase: str = (3_2, 1_2_8, 2_5_6)
__lowerCAmelCase: Dict = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 3_2:
__lowerCAmelCase: Union[str, Any] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
__lowerCAmelCase: Tuple = (3_2, 6_4, 1_2_8, 2_5_6)
__lowerCAmelCase: Optional[Any] = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
__lowerCAmelCase: Optional[Any] = torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
__lowerCAmelCase: Any = model.state_dict()
__lowerCAmelCase: int = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 1_4,
"out_channels": 1_4,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_5_5_3_6,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
__lowerCAmelCase: List[Any] = UNetaDModel(**__SCREAMING_SNAKE_CASE )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
__lowerCAmelCase: Tuple = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowerCAmelCase: Tuple = state_dict.pop(__SCREAMING_SNAKE_CASE )
hf_value_function.load_state_dict(__SCREAMING_SNAKE_CASE )
torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , "w" ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def a__ ( ) -> Optional[Any]:
__lowerCAmelCase: List[Any] = {
"in_channels": 1_4,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (3_2, 6_4, 1_2_8, 2_5_6),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_5_5_3_6,
"out_channels": 1_4,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
__lowerCAmelCase: List[str] = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
__lowerCAmelCase: int = model
__lowerCAmelCase: int = UNetaDModel(**__SCREAMING_SNAKE_CASE )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
__lowerCAmelCase: List[str] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowerCAmelCase: str = state_dict.pop(__SCREAMING_SNAKE_CASE )
hf_value_function.load_state_dict(__SCREAMING_SNAKE_CASE )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 217 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class snake_case :
SCREAMING_SNAKE_CASE_ : Optional[Union[str, Path]] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : Optional[Dict] = None
SCREAMING_SNAKE_CASE_ : Optional[str] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = True
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : Optional[Union[str, bool]] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : Optional[Dict] = None
SCREAMING_SNAKE_CASE_ : Optional[str] = None
def lowercase_ ( self : str)-> "DownloadConfig":
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase__) for k, v in self.__dict__.items()})
| 217 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Optional[int] =logging.get_logger(__name__)
A_ : Union[str, Any] ={
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : int = "lilt"
def __init__( self , a__=3_05_22 , a__=7_68 , a__=12 , a__=12 , a__=30_72 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=2 , a__=0.02 , a__=1e-12 , a__=0 , a__="absolute" , a__=None , a__=4 , a__=10_24 , **a__ , ):
super().__init__(pad_token_id=a__ , **a__ )
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_act
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = position_embedding_type
_lowerCamelCase = classifier_dropout
_lowerCamelCase = channel_shrink_ratio
_lowerCamelCase = max_ad_position_embeddings
| 355 |
"""simple docstring"""
from __future__ import annotations
A_ : List[Any] =list[tuple[int, int]]
A_ : Tuple =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A_ : List[str] =([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __a :
def __init__( self , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCamelCase = pos_x
_lowerCamelCase = pos_y
_lowerCamelCase = (pos_y, pos_x)
_lowerCamelCase = goal_x
_lowerCamelCase = goal_y
_lowerCamelCase = g_cost
_lowerCamelCase = parent
_lowerCamelCase = self.calculate_heuristic()
def snake_case_ ( self ):
_lowerCamelCase = abs(self.pos_x - self.goal_x )
_lowerCamelCase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , a__ ):
return self.f_cost < other.f_cost
class __a :
def __init__( self , a__ , a__ ):
_lowerCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , a__ )
_lowerCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , a__ )
_lowerCamelCase = [self.start]
_lowerCamelCase = []
_lowerCamelCase = False
def snake_case_ ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_lowerCamelCase = True
return self.retrace_path(a__ )
self.closed_nodes.append(a__ )
_lowerCamelCase = self.get_successors(a__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(a__ )
else:
# retrieve the best current path
_lowerCamelCase = self.open_nodes.pop(self.open_nodes.index(a__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(a__ )
else:
self.open_nodes.append(a__ )
if not self.reached:
return [self.start.pos]
return None
def snake_case_ ( self , a__ ):
_lowerCamelCase = []
for action in delta:
_lowerCamelCase = parent.pos_x + action[1]
_lowerCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
a__ , a__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , a__ , ) )
return successors
def snake_case_ ( self , a__ ):
_lowerCamelCase = node
_lowerCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCamelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
A_ : str =(0, 0)
A_ : Tuple =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
A_ : List[str] =GreedyBestFirst(init, goal)
A_ : Optional[int] =greedy_bf.search()
if path:
for pos_x, pos_y in path:
A_ : Optional[Any] =2
for elem in grid:
print(elem)
| 80 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=4_00 , lowercase=True , lowercase=None , lowercase=True , lowercase=1 / 2_55 , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , ) -> Optional[int]:
'''simple docstring'''
a__: Union[str, Any] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
a__: Dict = parent
a__: str = batch_size
a__: str = num_channels
a__: int = min_resolution
a__: Union[str, Any] = max_resolution
a__: Optional[Any] = do_resize
a__: Union[str, Any] = size
a__: int = do_rescale
a__: List[Any] = rescale_factor
a__: Dict = do_normalize
a__: Tuple = image_mean
a__: List[Any] = image_std
a__: Union[str, Any] = do_pad
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self , lowercase , lowercase=False) -> Optional[Any]:
'''simple docstring'''
if not batched:
a__: List[Any] = image_inputs[0]
if isinstance(lowercase , Image.Image):
a__ , a__: str = image.size
else:
a__ , a__: Any = image.shape[1], image.shape[2]
if w < h:
a__: Dict = int(self.size['shortest_edge'] * h / w)
a__: str = self.size['shortest_edge']
elif w > h:
a__: Union[str, Any] = self.size['shortest_edge']
a__: Dict = int(self.size['shortest_edge'] * w / h)
else:
a__: Tuple = self.size['shortest_edge']
a__: Optional[Any] = self.size['shortest_edge']
else:
a__: List[str] = []
for image in image_inputs:
a__ , a__: Optional[Any] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
a__: List[str] = max(lowercase , key=lambda lowercase: item[0])[0]
a__: Tuple = max(lowercase , key=lambda lowercase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = DetrImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: str = DetrImageProcessingTester(self)
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase , 'image_mean'))
self.assertTrue(hasattr(lowercase , 'image_std'))
self.assertTrue(hasattr(lowercase , 'do_normalize'))
self.assertTrue(hasattr(lowercase , 'do_rescale'))
self.assertTrue(hasattr(lowercase , 'rescale_factor'))
self.assertTrue(hasattr(lowercase , 'do_resize'))
self.assertTrue(hasattr(lowercase , 'size'))
self.assertTrue(hasattr(lowercase , 'do_pad'))
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33})
self.assertEqual(image_processor.do_pad , lowercase)
a__: Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase)
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84})
self.assertEqual(image_processor.do_pad , lowercase)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: int = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a__: Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase)
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image)
# Test not batched input
a__: str = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a__ , a__: str = self.image_processor_tester.get_expected_values(lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ , a__: Tuple = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase)
a__: Optional[Any] = image_processing(lowercase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[str] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a__: int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase)
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray)
# Test not batched input
a__: int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a__ , a__: str = self.image_processor_tester.get_expected_values(lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__: Dict = image_processing(lowercase , return_tensors='pt').pixel_values
a__ , a__: Any = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Tuple = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a__: List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase)
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor)
# Test not batched input
a__: Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
a__ , a__: Optional[int] = self.image_processor_tester.get_expected_values(lowercase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__: int = image_processing(lowercase , return_tensors='pt').pixel_values
a__ , a__: Tuple = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r') as f:
a__: Dict = json.loads(f.read())
a__: Union[str, Any] = {'image_id': 3_97_69, 'annotations': target}
# encode them
a__: List[str] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50')
a__: Dict = image_processing(images=lowercase , annotations=lowercase , return_tensors='pt')
# verify pixel values
a__: str = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding['pixel_values'].shape , lowercase)
a__: List[str] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase , atol=1e-4))
# verify area
a__: str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase))
# verify boxes
a__: Optional[Any] = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase)
a__: Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase , atol=1e-3))
# verify image_id
a__: Optional[Any] = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase))
# verify is_crowd
a__: int = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase))
# verify class_labels
a__: Dict = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase))
# verify orig_size
a__: Optional[Any] = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase))
# verify size
a__: Tuple = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase))
@slow
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r') as f:
a__: List[str] = json.loads(f.read())
a__: Optional[Any] = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
a__: Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
a__: Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic')
a__: Optional[int] = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors='pt')
# verify pixel values
a__: str = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding['pixel_values'].shape , lowercase)
a__: Any = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase , atol=1e-4))
# verify area
a__: Any = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase))
# verify boxes
a__: Optional[Any] = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase)
a__: List[str] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase , atol=1e-3))
# verify image_id
a__: Union[str, Any] = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase))
# verify is_crowd
a__: Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase))
# verify class_labels
a__: List[Any] = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase))
# verify masks
a__: int = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowercase)
# verify orig_size
a__: Optional[Any] = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase))
# verify size
a__: Any = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase))
| 290 | """simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
a__: int = None
if token is not None:
a__: Tuple = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Optional[Any] = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
a__: str = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
a__: str = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
a__: int = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
a__: Dict = requests.get(url + F'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Dict:
a__: Dict = None
if token is not None:
a__: List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Dict = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
a__: List[Any] = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
a__: Dict = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
a__: Optional[int] = requests.get(url + F'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: List[Any] = None
if token is not None:
a__: Optional[int] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = result.headers['Location']
a__: Optional[int] = requests.get(_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
a__: int = os.path.join(_SCREAMING_SNAKE_CASE , F'{artifact_name}.zip' )
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fp:
fp.write(response.content )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
a__: List[Any] = []
a__: Optional[Any] = []
a__: List[Any] = None
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_SCREAMING_SNAKE_CASE ) as f:
for line in f:
a__: Optional[int] = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
a__: Union[str, Any] = line[: line.index(': ' )]
a__: Union[str, Any] = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
a__: Optional[int] = line[len('FAILED ' ) :]
failed_tests.append(_SCREAMING_SNAKE_CASE )
elif filename == "job_name.txt":
a__: Union[str, Any] = line
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F'`errors` and `failed_tests` should have the same number of elements. Got {len(_SCREAMING_SNAKE_CASE )} for `errors` '
F'and {len(_SCREAMING_SNAKE_CASE )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
' problem.' )
a__: Tuple = None
if job_name and job_links:
a__: Dict = job_links.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# A list with elements of the form (line of error, error, failed test)
a__: int = [x + [y] + [job_link] for x, y in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return result
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->str:
a__: int = []
a__: Optional[int] = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for p in os.listdir(_SCREAMING_SNAKE_CASE ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_SCREAMING_SNAKE_CASE , job_links=_SCREAMING_SNAKE_CASE ) )
return errors
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Any:
a__: str = Counter()
counter.update([x[1] for x in logs] )
a__: int = counter.most_common()
a__: Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
a__: List[str] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
a__: Optional[Any] = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: List[str] = test.split('::' )[0]
if test.startswith('tests/models/' ):
a__: Dict = test.split('/' )[2]
else:
a__: Any = None
return test
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[str]:
a__: int = [(x[0], x[1], get_model(x[2] )) for x in logs]
a__: List[Any] = [x for x in logs if x[2] is not None]
a__: Optional[Any] = {x[2] for x in logs}
a__: Dict = {}
for test in tests:
a__: Union[str, Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
a__: Union[str, Any] = counter.most_common()
a__: List[str] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
a__: List[Any] = sum(error_counts.values() )
if n_errors > 0:
a__: Any = {'count': n_errors, 'errors': error_counts}
a__: Optional[int] = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: Any = '| no. | error | status |'
a__: Any = '|-:|:-|:-|'
a__: str = [header, sep]
for error in reduced_by_error:
a__: int = reduced_by_error[error]['count']
a__: Tuple = F'| {count} | {error[:100]} | |'
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
a__: List[str] = '| model | no. of errors | major error | count |'
a__: str = '|-:|-:|-:|-:|'
a__: int = [header, sep]
for model in reduced_by_model:
a__: Tuple = reduced_by_model[model]['count']
a__ , a__: Dict = list(reduced_by_model[model]['errors'].items() )[0]
a__: Dict = F'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowercase__ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowercase__ = get_job_links(args.workflow_run_id, token=args.token)
lowercase__ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowercase__ = k.find(' / ')
lowercase__ = k[index + len(' / ') :]
lowercase__ = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowercase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowercase__ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowercase__ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowercase__ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowercase__ = reduce_by_error(errors)
lowercase__ = reduce_by_model(errors)
lowercase__ = make_github_table(reduced_by_error)
lowercase__ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 290 | 1 |
import os
import time
import numpy as np
import onnxruntime as ort
__A ='''1'''
__A ='''0'''
__A ='''1'''
__A =ort.SessionOptions()
__A =ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
__A =['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
__A =ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
__A =ort.RunOptions()
__A =1_2_8
__A =1
__A =np.ones((batch, sequence), dtype=np.intaa)
__A =np.ones((batch, sequence), dtype=np.intaa)
__A =np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
__A =time.time()
__A =2_0_0_0
__A ={}
for iter in range(max_iters):
__A =sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1_0_0_0 / max_iters))
| 353 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase , lowercase ) -> List[Any]:
if dst_width < 0 or dst_height < 0:
raise ValueError("Destination width/height should be > 0" )
lowerCamelCase_ = img
lowerCamelCase_ = img.shape[1]
lowerCamelCase_ = img.shape[0]
lowerCamelCase_ = dst_width
lowerCamelCase_ = dst_height
lowerCamelCase_ = self.src_w / self.dst_w
lowerCamelCase_ = self.src_h / self.dst_h
lowerCamelCase_ = lowerCamelCase_ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowerCamelCase_ = self.img[self.get_y(lowercase )][self.get_x(lowercase )]
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
return int(self.ratio_x * x )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
__A, __A =8_0_0, 6_0_0
__A =imread('''image_data/lena.jpg''', 1)
__A =NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 47 | 0 |
import qiskit
def __UpperCamelCase ( _A : int , _A : int ) ->qiskit.result.counts.Counts:
"""simple docstring"""
lowerCamelCase_ =qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
lowerCamelCase_ =qiskit.QuantumCircuit(_A , _A )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
lowerCamelCase_ =qiskit.execute(_A , _A , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_A )
if __name__ == "__main__":
__A : Tuple = single_qubit_measure(2, 2)
print(F"""Total count for various states are: {counts}""")
| 154 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__A : Dict = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> None:
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 154 | 1 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__snake_case =["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Tuple=1 ) -> Optional[Any]:
lowerCAmelCase = tokenizer
lowerCAmelCase = dataset
lowerCAmelCase = len(UpperCAmelCase__ ) if n_tasks is None else n_tasks
lowerCAmelCase = n_copies
def __iter__( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCAmelCase = self.tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict ) -> str:
lowerCAmelCase = start_length
lowerCAmelCase = eof_strings
lowerCAmelCase = tokenizer
def __call__( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , **UpperCAmelCase__ : Any ) -> Union[str, Any]:
lowerCAmelCase = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCAmelCase = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCAmelCase__ )
def a_ ( lowerCamelCase : Optional[int] ):
lowerCAmelCase = re.split('(%s)' % '|'.join(lowerCamelCase ) , lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def a_ ( lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Optional[Any]=20 , **lowerCamelCase : List[Any] ):
lowerCAmelCase = defaultdict(lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(lowerCamelCase ) ):
with torch.no_grad():
lowerCAmelCase = batch['ids'].shape[-1]
lowerCAmelCase = accelerator.unwrap_model(lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=lowerCamelCase , **lowerCamelCase )
# each task is generated batch_size times
lowerCAmelCase = batch['task_id'].repeat(lowerCamelCase )
lowerCAmelCase = accelerator.pad_across_processes(
lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCAmelCase , lowerCAmelCase = accelerator.gather((generated_tokens, generated_tasks) )
lowerCAmelCase = generated_tokens.cpu().numpy()
lowerCAmelCase = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(lowerCamelCase , lowerCamelCase ):
gen_token_dict[task].append(lowerCamelCase )
lowerCAmelCase = [[] for _ in range(lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCAmelCase = tokenizer.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
code_gens[task].append(remove_last_block(lowerCamelCase ) )
return code_gens
def a_ ( ):
# Setup configuration
lowerCAmelCase = HfArgumentParser(lowerCamelCase )
lowerCAmelCase = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCAmelCase = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCAmelCase = 'false'
if args.num_workers is None:
lowerCAmelCase = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCAmelCase = Accelerator()
set_seed(args.seed , device_specific=lowerCamelCase )
# Load model and tokenizer
lowerCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCAmelCase = tokenizer.eos_token
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCAmelCase = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , lowerCamelCase , lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCAmelCase = load_dataset('openai_humaneval' )
lowerCAmelCase = load_metric('code_eval' )
lowerCAmelCase = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCAmelCase = args.n_samples // args.batch_size
lowerCAmelCase = TokenizedDataset(lowerCamelCase , human_eval['test'] , n_copies=lowerCamelCase , n_tasks=lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCAmelCase = DataLoader(lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCAmelCase = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCAmelCase , lowerCAmelCase = accelerator.prepare(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = complete_code(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , n_tasks=lowerCamelCase , batch_size=args.batch_size , **lowerCamelCase , )
if accelerator.is_main_process:
lowerCAmelCase = []
for task in tqdm(range(lowerCamelCase ) ):
lowerCAmelCase = human_eval['test'][task]['test']
lowerCAmelCase = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCAmelCase , lowerCAmelCase = code_eval_metric.compute(
references=lowerCamelCase , predictions=lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 55 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def a_ ( lowerCamelCase : str = "AAPL" ):
lowerCAmelCase = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase ).text , 'html.parser' )
lowerCAmelCase = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 55 | 1 |
"""simple docstring"""
import logging
from transformers import PretrainedConfig
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : int = """bertabs"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : List[str]=30522 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : Optional[int]=6 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : List[Any]=8 , __SCREAMING_SNAKE_CASE : List[Any]=512 , __SCREAMING_SNAKE_CASE : Tuple=0.2 , __SCREAMING_SNAKE_CASE : List[Any]=6 , __SCREAMING_SNAKE_CASE : Union[str, Any]=768 , __SCREAMING_SNAKE_CASE : List[Any]=8 , __SCREAMING_SNAKE_CASE : List[Any]=2048 , __SCREAMING_SNAKE_CASE : List[Any]=0.2 , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Union[str, Any]:
super().__init__(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_pos
lowerCamelCase_ = enc_layers
lowerCamelCase_ = enc_hidden_size
lowerCamelCase_ = enc_heads
lowerCamelCase_ = enc_ff_size
lowerCamelCase_ = enc_dropout
lowerCamelCase_ = dec_layers
lowerCamelCase_ = dec_hidden_size
lowerCamelCase_ = dec_heads
lowerCamelCase_ = dec_ff_size
lowerCamelCase_ = dec_dropout
| 183 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE : Tuple = random.Random()
def lowerCamelCase__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any]=1.0 , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : str=None ) -> List[str]:
if rng is None:
lowerCamelCase_ = global_rng
lowerCamelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class a ( unittest.TestCase ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple=7 , __SCREAMING_SNAKE_CASE : Union[str, Any]=400 , __SCREAMING_SNAKE_CASE : int=2000 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : Dict=16000 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[Any]=80 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : int=64 , __SCREAMING_SNAKE_CASE : Tuple="hann_window" , __SCREAMING_SNAKE_CASE : Dict=80 , __SCREAMING_SNAKE_CASE : List[str]=7600 , __SCREAMING_SNAKE_CASE : List[str]=1e-1_0 , __SCREAMING_SNAKE_CASE : Any=True , ) -> List[str]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = min_seq_length
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ = feature_size
lowerCamelCase_ = padding_value
lowerCamelCase_ = sampling_rate
lowerCamelCase_ = do_normalize
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = hop_length
lowerCamelCase_ = win_length
lowerCamelCase_ = win_function
lowerCamelCase_ = fmin
lowerCamelCase_ = fmax
lowerCamelCase_ = mel_floor
lowerCamelCase_ = return_attention_mask
def UpperCamelCase ( self : List[Any] ) -> List[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> str:
def _flatten(__SCREAMING_SNAKE_CASE : Any ):
return list(itertools.chain(*__SCREAMING_SNAKE_CASE ) )
if equal_length:
lowerCamelCase_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(__SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Any=False ) -> int:
if equal_length:
lowerCamelCase_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(__SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Any = SpeechTaFeatureExtractor
def UpperCamelCase ( self : Any ) -> Any:
lowerCamelCase_ = SpeechTaFeatureExtractionTester(self )
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
self.assertTrue(np.all(np.mean(__SCREAMING_SNAKE_CASE , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__SCREAMING_SNAKE_CASE , axis=0 ) - 1 ) < 1e-3 ) )
def UpperCamelCase ( self : List[Any] ) -> int:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCamelCase_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test batched
lowerCamelCase_ = feat_extract(__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values
lowerCamelCase_ = feat_extract(__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = ['longest', 'max_length', 'do_not_pad']
lowerCamelCase_ = [None, 1600, None]
for max_length, padding in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = feat_extract(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors='np' )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase ( self : Dict ) -> Dict:
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = range(800 , 1400 , 200 )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in lengths]
lowerCamelCase_ = ['longest', 'max_length', 'do_not_pad']
lowerCamelCase_ = [None, 1600, None]
for max_length, padding in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = feat_extract(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase ( self : Tuple ) -> Optional[int]:
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feat_extract(
__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1000 , padding='max_length' , return_tensors='np' )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase ( self : List[str] ) -> List[Any]:
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feat_extract(
__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1000 , padding='longest' , return_tensors='np' )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feat_extract(
__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=2000 , padding='longest' , return_tensors='np' )
lowerCamelCase_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = np.random.rand(100 ).astype(np.floataa )
lowerCamelCase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCamelCase_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase ( self : List[Any] ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase_ = feature_extractor(audio_target=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowerCamelCase_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
lowerCamelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test batched
lowerCamelCase_ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values
lowerCamelCase_ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase_ = np.asarray(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values
lowerCamelCase_ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def UpperCamelCase ( self : int ) -> Union[str, Any]:
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) for x, y in zip(__SCREAMING_SNAKE_CASE , processed_features[input_name] ) ) )
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
lowerCamelCase_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCamelCase ( self : Tuple ) -> Any:
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
lowerCamelCase_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCamelCase ( self : List[Any] ) -> List[Any]:
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ = feat_extract.num_mel_bins # hack!
lowerCamelCase_ = feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='np' )[input_name]
lowerCamelCase_ = feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def UpperCamelCase ( self : Dict ) -> Union[str, Any]:
lowerCamelCase_ = self.feat_extract_dict
lowerCamelCase_ = True
lowerCamelCase_ = self.feature_extraction_class(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ = [len(__SCREAMING_SNAKE_CASE ) for x in speech_inputs]
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ = feat_extract.num_mel_bins # hack!
lowerCamelCase_ = feat_extract.pad(__SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , __SCREAMING_SNAKE_CASE )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
lowerCamelCase_ = self.feat_extract_dict
lowerCamelCase_ = True
lowerCamelCase_ = self.feature_extraction_class(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.feat_extract_tester.prepare_inputs_for_target()
lowerCamelCase_ = [len(__SCREAMING_SNAKE_CASE ) for x in speech_inputs]
lowerCamelCase_ = feat_extract.model_input_names[0]
lowerCamelCase_ = BatchFeature({input_name: speech_inputs} )
lowerCamelCase_ = min(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = feat_extract.num_mel_bins # hack!
lowerCamelCase_ = feat_extract.pad(
__SCREAMING_SNAKE_CASE , padding='max_length' , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='np' )
self.assertIn('attention_mask' , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
from datasets import load_dataset
lowerCamelCase_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCamelCase_ = ds.sort('id' ).select(range(__SCREAMING_SNAKE_CASE ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def UpperCamelCase ( self : str ) -> Dict:
# fmt: off
lowerCamelCase_ = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = SpeechTaFeatureExtractor()
lowerCamelCase_ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , __SCREAMING_SNAKE_CASE , atol=1e-6 ) )
def UpperCamelCase ( self : List[Any] ) -> Optional[int]:
# fmt: off
lowerCamelCase_ = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = SpeechTaFeatureExtractor()
lowerCamelCase_ = feature_extractor(audio_target=__SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 183 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase_ : Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = ['PoolFormerFeatureExtractor']
lowerCamelCase_ : Tuple = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 358 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Construct model
if gpta_config_file == "":
UpperCamelCase_: Union[str, Any] = GPTaConfig()
else:
UpperCamelCase_: Optional[Any] = GPTaConfig.from_json_file(lowerCamelCase )
UpperCamelCase_: List[Any] = GPTaModel(lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
UpperCamelCase_: int = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
UpperCamelCase_: Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
lowerCamelCase_ : List[str] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 223 | 0 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self) -> Any:
super().__init__()
__UpperCamelCase :Optional[int] = nn.Linear(3 , 4)
__UpperCamelCase :List[Any] = nn.BatchNormad(4)
__UpperCamelCase :List[Any] = nn.Linear(4 , 5)
def UpperCamelCase__ ( self , __lowercase) -> Dict:
return self.lineara(self.batchnorm(self.lineara(__lowercase)))
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Dict = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__lowercase , model.state_dict())
__UpperCamelCase :str = os.path.join(__lowercase , '''index.json''')
self.assertTrue(os.path.isfile(__lowercase))
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
__UpperCamelCase :Tuple = os.path.join(__lowercase , f"""{key}.dat""")
self.assertTrue(os.path.isfile(__lowercase))
# TODO: add tests on the fact weights are properly loaded
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :List[str] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
__UpperCamelCase :List[Any] = torch.randn(2 , 3 , dtype=__lowercase)
with TemporaryDirectory() as tmp_dir:
__UpperCamelCase :Tuple = offload_weight(__lowercase , '''weight''' , __lowercase , {})
__UpperCamelCase :Optional[int] = os.path.join(__lowercase , '''weight.dat''')
self.assertTrue(os.path.isfile(__lowercase))
self.assertDictEqual(__lowercase , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(__lowercase).split('''.''')[1]}})
__UpperCamelCase :List[str] = load_offloaded_weight(__lowercase , index['''weight'''])
self.assertTrue(torch.equal(__lowercase , __lowercase))
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Optional[Any] = ModelForTest()
__UpperCamelCase :Any = model.state_dict()
__UpperCamelCase :str = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
__UpperCamelCase :str = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__lowercase , __lowercase)
__UpperCamelCase :Optional[int] = OffloadedWeightsLoader(state_dict=__lowercase , save_folder=__lowercase)
# Every key is there with the right value
self.assertEqual(sorted(__lowercase) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__lowercase , weight_map[key]))
__UpperCamelCase :Union[str, Any] = {k: v for k, v in state_dict.items() if '''weight''' in k}
__UpperCamelCase :Optional[Any] = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__lowercase , __lowercase)
__UpperCamelCase :List[str] = OffloadedWeightsLoader(state_dict=__lowercase , save_folder=__lowercase)
# Every key is there with the right value
self.assertEqual(sorted(__lowercase) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__lowercase , weight_map[key]))
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__lowercase , __lowercase)
# Duplicates are removed
__UpperCamelCase :Tuple = OffloadedWeightsLoader(state_dict=__lowercase , save_folder=__lowercase)
# Every key is there with the right value
self.assertEqual(sorted(__lowercase) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__lowercase , weight_map[key]))
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :List[Any] = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
__UpperCamelCase :Optional[Any] = extract_submodules_state_dict(__lowercase , ['''a.1''', '''a.2'''])
self.assertDictEqual(__lowercase , {'''a.1''': 0, '''a.2''': 2})
__UpperCamelCase :List[Any] = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
__UpperCamelCase :Optional[int] = extract_submodules_state_dict(__lowercase , ['''a.1''', '''a.2'''])
self.assertDictEqual(__lowercase , {'''a.1.a''': 0, '''a.2.a''': 2})
| 43 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
_UpperCAmelCase : Union[str, Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_UpperCAmelCase : List[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class __lowerCAmelCase ( lowerCAmelCase):
_a = '''whisper'''
_a = ['''past_key_values''']
_a = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self: int , _lowerCAmelCase: str=5_18_65 , _lowerCAmelCase: str=80 , _lowerCAmelCase: int=6 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Union[str, Any]=6 , _lowerCAmelCase: List[Any]=4 , _lowerCAmelCase: Any=15_36 , _lowerCAmelCase: Union[str, Any]=15_36 , _lowerCAmelCase: str=0.0 , _lowerCAmelCase: str=0.0 , _lowerCAmelCase: List[Any]=5_02_57 , _lowerCAmelCase: Optional[Any]=True , _lowerCAmelCase: Tuple=True , _lowerCAmelCase: str="gelu" , _lowerCAmelCase: Dict=2_56 , _lowerCAmelCase: Union[str, Any]=0.0 , _lowerCAmelCase: Any=0.0 , _lowerCAmelCase: Dict=0.0 , _lowerCAmelCase: Union[str, Any]=0.02 , _lowerCAmelCase: Any=False , _lowerCAmelCase: List[str]=15_00 , _lowerCAmelCase: Tuple=4_48 , _lowerCAmelCase: Optional[Any]=5_02_56 , _lowerCAmelCase: Dict=5_02_56 , _lowerCAmelCase: List[Any]=5_02_56 , _lowerCAmelCase: Union[str, Any]=None , _lowerCAmelCase: str=[2_20, 5_02_56] , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: Optional[int]=2_56 , _lowerCAmelCase: int=False , _lowerCAmelCase: Dict=0.05 , _lowerCAmelCase: Optional[Any]=10 , _lowerCAmelCase: List[str]=2 , _lowerCAmelCase: Tuple=0.0 , _lowerCAmelCase: str=10 , _lowerCAmelCase: Union[str, Any]=0 , _lowerCAmelCase: List[Any]=7 , **_lowerCAmelCase: Union[str, Any] , ):
lowercase :Optional[Any] = vocab_size
lowercase :Optional[int] = num_mel_bins
lowercase :Union[str, Any] = d_model
lowercase :List[Any] = encoder_layers
lowercase :Optional[Any] = encoder_attention_heads
lowercase :Union[str, Any] = decoder_layers
lowercase :List[str] = decoder_attention_heads
lowercase :Optional[int] = decoder_ffn_dim
lowercase :List[Any] = encoder_ffn_dim
lowercase :Optional[Any] = dropout
lowercase :Tuple = attention_dropout
lowercase :Tuple = activation_dropout
lowercase :Optional[Any] = activation_function
lowercase :Any = init_std
lowercase :Optional[int] = encoder_layerdrop
lowercase :Optional[int] = decoder_layerdrop
lowercase :str = use_cache
lowercase :Optional[Any] = encoder_layers
lowercase :List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase :Any = max_source_positions
lowercase :Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase :int = classifier_proj_size
lowercase :List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase :Tuple = apply_spec_augment
lowercase :int = mask_time_prob
lowercase :Union[str, Any] = mask_time_length
lowercase :Dict = mask_time_min_masks
lowercase :Tuple = mask_feature_prob
lowercase :List[Any] = mask_feature_length
lowercase :List[Any] = mask_feature_min_masks
lowercase :Any = median_filter_width
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , suppress_tokens=_lowerCAmelCase , begin_suppress_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
class __lowerCAmelCase ( lowerCAmelCase):
@property
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Tuple = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
lowercase :List[Any] = {0: "batch"}
else:
lowercase :str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" )
return common_inputs
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCAmelCase: int = -1 , _lowerCAmelCase: int = -1 , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional["TensorType"] = None , _lowerCAmelCase: int = 2_20_50 , _lowerCAmelCase: float = 5.0 , _lowerCAmelCase: int = 2_20 , ):
lowercase :List[str] = OrderedDict()
lowercase :str = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_lowerCAmelCase , framework=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , time_duration=_lowerCAmelCase , frequency=_lowerCAmelCase , )
lowercase :Optional[Any] = encoder_inputs["input_features"].shape[2]
lowercase :List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase :Dict = super().generate_dummy_inputs(
preprocessor.tokenizer , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase :str = encoder_inputs.pop("input_features" )
lowercase :Optional[int] = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
lowercase :List[str] = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def SCREAMING_SNAKE_CASE ( self: str ):
return 1e-3
| 236 | 0 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _a ( *a :List[str] ) -> List[Any]:
with open(a , '''r''' ) as fh:
fcntl.flock(a , fcntl.LOCK_EX )
try:
print(*a )
finally:
fcntl.flock(a , fcntl.LOCK_UN )
UpperCAmelCase__ = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
UpperCAmelCase__ = torch.device("cuda", local_rank)
UpperCAmelCase__ = socket.gethostname()
UpperCAmelCase__ = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase__ = dist.get_rank()
UpperCAmelCase__ = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 26 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _a ( a :Tuple ) -> int:
a = tmp_path / '''file.csv'''
a = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :int ) -> List[str]:
a = tmp_path / '''malformed_file.csv'''
a = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :Dict , a :int ) -> List[str]:
a = tmp_path / '''csv_with_image.csv'''
a = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :List[Any] ) -> Dict:
a = tmp_path / '''csv_with_label.csv'''
a = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :Tuple ) -> Any:
a = tmp_path / '''csv_with_int_list.csv'''
a = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
def _a ( a :Dict , a :int , a :Union[str, Any] ) -> List[Any]:
a = Csv()
a = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(a , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(a ) in record.message
for record in caplog.records )
@require_pil
def _a ( a :Dict ) -> Any:
with open(a , encoding='''utf-8''' ) as f:
a = f.read().splitlines()[1]
a = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
a = csv._generate_tables([[csv_file_with_image]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
a = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _a ( a :Any ) -> Tuple:
with open(a , encoding='''utf-8''' ) as f:
a = f.read().splitlines()[1:]
a = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
a = csv._generate_tables([[csv_file_with_label]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
a = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(a ) for label in labels]
def _a ( a :Union[str, Any] ) -> Optional[Any]:
a = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda a : [int(a ) for i in x.split()]} )
a = csv._generate_tables([[csv_file_with_int_list]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
a = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 26 | 1 |
'''simple docstring'''
from manim import *
class a_ (_a ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : List[str] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : List[Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
_lowerCAmelCase : Optional[int] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
_lowerCAmelCase : List[Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
_lowerCAmelCase : Tuple = Text("""CPU""" , font_size=2_4 )
_lowerCAmelCase : List[str] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
_lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(1 )]
_lowerCAmelCase : Optional[Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
_lowerCAmelCase : List[str] = Text("""GPU""" , font_size=2_4 )
_lowerCAmelCase : Union[str, Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
_lowerCAmelCase : str = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
_lowerCAmelCase : List[Any] = Text("""Model""" , font_size=2_4 )
_lowerCAmelCase : Dict = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
_lowerCAmelCase : Any = MarkupText(
f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=2_4 , )
_lowerCAmelCase : Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase : Optional[Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Union[str, Any] = []
for i, rect in enumerate(snake_case_ ):
_lowerCAmelCase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
_lowerCAmelCase : Dict = 0.46 / 4
_lowerCAmelCase : Union[str, Any] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 309 |
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309 | 1 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = DownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Union[str, Any] = "down"
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : int = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = ResnetDownsampleBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Any = "down"
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : List[Any] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = AttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Any = "down"
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : str = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = CrossAttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Dict = "down"
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
_UpperCamelCase : str = super().prepare_init_args_and_inputs_for_common()
_UpperCamelCase : str = 32
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
_UpperCamelCase : Optional[int] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = SimpleCrossAttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Tuple = "down"
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
return super().get_dummy_input(include_encoder_hidden_states=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
_UpperCamelCase : Dict = super().prepare_init_args_and_inputs_for_common()
_UpperCamelCase : List[Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
_UpperCamelCase : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = SkipDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Optional[Any] = "down"
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
return super().get_dummy_input(include_skip_sample=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
_UpperCamelCase : Optional[int] = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = AttnSkipDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Optional[int] = "down"
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return super().get_dummy_input(include_skip_sample=__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase : Optional[Any] = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = DownEncoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Any = "down"
@property
def __SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
return super().get_dummy_input(include_temb=__a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = {
"in_channels": 32,
"out_channels": 32,
}
_UpperCamelCase : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
_UpperCamelCase : Any = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = AttnDownEncoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Any = "down"
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return super().get_dummy_input(include_temb=__a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = {
"in_channels": 32,
"out_channels": 32,
}
_UpperCamelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase : List[Any] = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = UNetMidBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Dict = "mid"
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
_UpperCamelCase : List[Any] = {
"in_channels": 32,
"temb_channels": 128,
}
_UpperCamelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : int = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = UNetMidBlockaDCrossAttn # noqa F405
SCREAMING_SNAKE_CASE__ :Optional[int] = "mid"
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Optional[Any] = super().prepare_init_args_and_inputs_for_common()
_UpperCamelCase : Union[str, Any] = 32
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
_UpperCamelCase : int = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = UNetMidBlockaDSimpleCrossAttn # noqa F405
SCREAMING_SNAKE_CASE__ :Any = "mid"
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
return super().get_dummy_input(include_encoder_hidden_states=__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
_UpperCamelCase : Tuple = super().prepare_init_args_and_inputs_for_common()
_UpperCamelCase : List[Any] = 32
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = UpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :List[str] = "up"
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return super().get_dummy_input(include_res_hidden_states_tuple=__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
_UpperCamelCase : Any = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = ResnetUpsampleBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Optional[Any] = "up"
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
return super().get_dummy_input(include_res_hidden_states_tuple=__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
_UpperCamelCase : List[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = CrossAttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Optional[Any] = "up"
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
return super().get_dummy_input(include_res_hidden_states_tuple=__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
_UpperCamelCase : Dict = super().prepare_init_args_and_inputs_for_common()
_UpperCamelCase : List[Any] = 32
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
_UpperCamelCase : str = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = SimpleCrossAttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :int = "up"
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
return super().get_dummy_input(include_res_hidden_states_tuple=__a , include_encoder_hidden_states=__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Optional[Any] = super().prepare_init_args_and_inputs_for_common()
_UpperCamelCase : int = 32
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase : int = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = AttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Optional[int] = "up"
@property
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
return super().get_dummy_input(include_res_hidden_states_tuple=__a )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
_UpperCamelCase : Dict = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = SkipUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :List[str] = "up"
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
_UpperCamelCase : int = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = AttnSkipUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :Tuple = "up"
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return super().get_dummy_input(include_res_hidden_states_tuple=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
_UpperCamelCase : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = UpDecoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :int = "up"
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
return super().get_dummy_input(include_temb=__a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
_UpperCamelCase : str = {"in_channels": 32, "out_channels": 32}
_UpperCamelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
_UpperCamelCase : List[str] = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(__a )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = AttnUpDecoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ :List[str] = "up"
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
return super().get_dummy_input(include_temb=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
_UpperCamelCase : int = {"in_channels": 32, "out_channels": 32}
_UpperCamelCase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
_UpperCamelCase : str = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(__a )
| 354 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
__a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowercase__:
"""simple docstring"""
a :Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
a :Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
a :Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
a :Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'A folder containing the training data.'} )
a :Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'A folder containing the validation data.'} )
a :Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
a :int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
a :float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
a :Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a :Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def _lowercase ( self : List[str] ) -> Tuple:
lowercase_ = {}
if self.train_dir is not None:
lowercase_ = self.train_dir
if self.validation_dir is not None:
lowercase_ = self.validation_dir
lowercase_ = data_files if data_files else None
@dataclass
class lowercase__:
"""simple docstring"""
a :str = field(
default=UpperCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
a :Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(UpperCAmelCase )} , )
a :Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a :Optional[str] = field(
default=UpperCAmelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
a :Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
a :str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a :str = field(default=UpperCAmelCase , metadata={'help': 'Name or path of preprocessor config.'} )
a :bool = field(
default=UpperCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
a :Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
a :Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
a :Optional[int] = field(
default=UpperCAmelCase , metadata={'help': 'Stride to use for the encoder.'} , )
class lowercase__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : List[str]=1_9_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : List[str]=0.6 ) -> List[str]:
lowercase_ = input_size
lowercase_ = mask_patch_size
lowercase_ = model_patch_size
lowercase_ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
lowercase_ = self.input_size // self.mask_patch_size
lowercase_ = self.mask_patch_size // self.model_patch_size
lowercase_ = self.rand_size**2
lowercase_ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : List[str] ) -> List[Any]:
lowercase_ = np.random.permutation(self.token_count )[: self.mask_count]
lowercase_ = np.zeros(self.token_count , dtype=SCREAMING_SNAKE_CASE_ )
lowercase_ = 1
lowercase_ = mask.reshape((self.rand_size, self.rand_size) )
lowercase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def a ( snake_case__: str ):
'''simple docstring'''
lowercase_ = torch.stack([example['''pixel_values'''] for example in examples] )
lowercase_ = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def a ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case__ ) and data_args.train_val_split > 0.0:
lowercase_ = ds['''train'''].train_test_split(data_args.train_val_split )
lowercase_ = split['''train''']
lowercase_ = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case__ )
elif model_args.model_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
lowercase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(snake_case__ , '''decoder_type''' ):
lowercase_ = '''simmim'''
# adapt config
lowercase_ = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase_ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case__ )
elif model_args.model_name_or_path:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
lowercase_ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase_ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowercase_ = AutoModelForMaskedImageModeling.from_config(snake_case__ )
if training_args.do_train:
lowercase_ = ds['''train'''].column_names
else:
lowercase_ = ds['''validation'''].column_names
if data_args.image_column_name is not None:
lowercase_ = data_args.image_column_name
elif "image" in column_names:
lowercase_ = '''image'''
elif "img" in column_names:
lowercase_ = '''img'''
else:
lowercase_ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase_ = Compose(
[
Lambda(lambda snake_case__ : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.6_7, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase_ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(snake_case__: Optional[int] ):
lowercase_ = [transforms(snake_case__ ) for image in examples[image_column_name]]
lowercase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowercase_ = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowercase_ = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case__ )
# Initialize our trainer
lowercase_ = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
lowercase_ = None
if training_args.resume_from_checkpoint is not None:
lowercase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ = last_checkpoint
lowercase_ = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case__ )
trainer.save_metrics('''eval''' , snake_case__ )
# Write model card and (optionally) push to hub
lowercase_ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
if __name__ == "__main__":
main()
| 30 |
from datetime import datetime as dt
import os
from github import Github
A__ : List[str] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def UpperCamelCase( ):
lowerCAmelCase_ : Union[str, Any] = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCAmelCase_ : Tuple = g.get_repo('''huggingface/transformers''' )
lowerCAmelCase_ : int = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCAmelCase_ : Optional[Any] = sorted([comment for comment in issue.get_comments()] ,key=lambda __UpperCamelCase : i.created_at ,reverse=__UpperCamelCase )
lowerCAmelCase_ : Tuple = comments[0] if len(__UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 103 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase , UpperCAmelCase : str = [], []
while len(UpperCAmelCase_ ) > 1:
UpperCAmelCase , UpperCAmelCase : Any = min(UpperCAmelCase_ ), max(UpperCAmelCase_ )
start.append(UpperCAmelCase_ )
end.append(UpperCAmelCase_ )
collection.remove(UpperCAmelCase_ )
collection.remove(UpperCAmelCase_ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
lowercase__ = input("Enter numbers separated by a comma:\n").strip()
lowercase__ = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 280 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : str ) -> Tuple:
UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
UpperCAmelCase : Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
UpperCAmelCase : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase : Dict = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase : int = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase_ ( self : str ) -> List[Any]:
UpperCAmelCase : Optional[int] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
UpperCAmelCase : Dict = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ = Accelerator()
lowercase__ = (accelerator.state.process_index + 2, 10)
lowercase__ = torch.randint(0, 10, shape).to(accelerator.device)
lowercase__ = ""
lowercase__ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase__ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase__ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 280 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCAmelCase__ = 10
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
for i in range(UpperCamelCase__ , UpperCamelCase__ ):
if array[i] == target:
return i
return -1
def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
_A : Any = 0
_A : Union[str, Any] = len(UpperCamelCase__ )
while left <= right:
if right - left < precision:
return lin_search(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Dict = (left + right) // 3 + 1
_A : Any = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_A : Optional[Any] = one_third - 1
elif array[two_third] < target:
_A : List[Any] = two_third + 1
else:
_A : int = one_third + 1
_A : List[Any] = two_third - 1
else:
return -1
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
if left < right:
if right - left < precision:
return lin_search(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Optional[int] = (left + right) // 3 + 1
_A : Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCamelCase__ , one_third - 1 , UpperCamelCase__ , UpperCamelCase__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCamelCase__ , UpperCamelCase__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input('Enter numbers separated by comma:\n').strip()
lowerCAmelCase__ = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
lowerCAmelCase__ = int(input('Enter the number to be found in the list:\n').strip())
lowerCAmelCase__ = ite_ternary_search(collection, target)
lowerCAmelCase__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"Iterative search: {target} found at positions: {resulta}")
print(f"Recursive search: {target} found at positions: {resulta}")
else:
print('Not found')
| 11 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__UpperCAmelCase =["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def __A ( a_ :int , a_ :int = 2 , a_ :int = 1 , a_ :int = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('''The input value cannot be less than 2''')
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(a_ :int , a_ :int , a_ :int) -> int:
return (pow(a_ , 2) + step) % modulus
for _ in range(a_):
# These track the position within the cycle detection logic.
__a : Any = seed
__a : int = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__a : Optional[Any] = rand_fn(a_ , a_ , a_)
__a : int = rand_fn(a_ , a_ , a_)
__a : List[Any] = rand_fn(a_ , a_ , a_)
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__a : int = gcd(hare - tortoise , a_)
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__a : Optional[int] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
A = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
A = parser.parse_args()
A = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'{args.num} is probably prime')
else:
A = args.num // divisor
print(F'{args.num} = {divisor} * {quotient}') | 367 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
A = TypeVar('''T''')
class __lowercase ( Generic[T] ):
'''simple docstring'''
__lowerCAmelCase = 42 # Cache store of keys
__lowerCAmelCase = 42 # References of the keys in cache
__lowerCAmelCase = 10 # Maximum capacity of cache
def __init__( self , _UpperCAmelCase ):
__a : Optional[int] = deque()
__a : Dict = set()
if not n:
__a : List[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
__a : str = n
def _lowerCamelCase ( self , _UpperCAmelCase ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__a : int = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def _lowerCamelCase ( self ):
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self ):
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
A = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]" | 188 | 0 |
from __future__ import annotations
def __A ( __lowerCAmelCase )-> float:
"""simple docstring"""
if not nums:
raise ValueError('List is empty' )
return sum(__lowerCAmelCase ) / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase : Optional[Any] = 16
UpperCAmelCase : Optional[Any] = 32
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
return int(x / 2**20 )
class __lowercase :
"""simple docstring"""
def __enter__( self ) -> Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCamelCase = torch.cuda.memory_allocated()
return self
def __exit__( self , *A ) -> int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
lowerCamelCase = torch.cuda.memory_allocated()
lowerCamelCase = torch.cuda.max_memory_allocated()
lowerCamelCase = bamb(self.end - self.begin )
lowerCamelCase = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCamelCase ( lowerCamelCase__ : Accelerator , lowerCamelCase__ : int = 16 , lowerCamelCase__ : str = "bert-base-cased" , lowerCamelCase__ : int = 320 , lowerCamelCase__ : int = 160 , ):
'''simple docstring'''
lowerCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ )
lowerCamelCase = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": f'train[:{n_train}]', """validation""": f'validation[:{n_val}]'} )
def tokenize_function(lowerCamelCase__ : str ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple ):
'''simple docstring'''
lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase = config["""lr"""]
lowerCamelCase = int(config["""num_epochs"""] )
lowerCamelCase = int(config["""seed"""] )
lowerCamelCase = int(config["""batch_size"""] )
lowerCamelCase = args.model_name_or_path
set_seed(lowerCamelCase__ )
lowerCamelCase , lowerCamelCase = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ , return_dict=lowerCamelCase__ )
# Instantiate optimizer
lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase = optimizer_cls(params=model.parameters() , lr=lowerCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCamelCase = 1
lowerCamelCase = (len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=0 , num_training_steps=lowerCamelCase__ , )
else:
lowerCamelCase = DummyScheduler(lowerCamelCase__ , total_num_steps=lowerCamelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase = 0
# Now we train the model
lowerCamelCase = {}
for epoch in range(lowerCamelCase__ , lowerCamelCase__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowerCamelCase__ ):
lowerCamelCase = model(**lowerCamelCase__ )
lowerCamelCase = outputs.loss
lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCamelCase = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowerCamelCase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase__ , )
parser.add_argument(
"""--output_dir""" , type=lowerCamelCase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=lowerCamelCase__ , default=320 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=lowerCamelCase__ , default=160 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase__ , default=1 , help="""Number of train epochs.""" , )
lowerCamelCase = parser.parse_args()
lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 252 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["image_processor", "tokenizer"]
__lowerCAmelCase = "BlipImageProcessor"
__lowerCAmelCase = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , __A , __A ) -> List[Any]:
a =False
super().__init__(__A , __A )
a =self.image_processor
def __call__( self , __A = None , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
a =self.tokenizer
a =self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
return text_encoding
# add pixel_values
a =self.image_processor(__A , return_tensors=__A )
if text is not None:
a =self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
else:
a =None
if text_encoding is not None:
encoding_image_processor.update(__A )
return encoding_image_processor
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> str:
return self.tokenizer.batch_decode(*__A , **__A )
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> str:
return self.tokenizer.decode(*__A , **__A )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =self.tokenizer.model_input_names
a =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 215 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "WhisperFeatureExtractor"
__lowerCAmelCase = "WhisperTokenizer"
def __init__( self , __A , __A ) -> Dict:
super().__init__(__A , __A )
a =self.feature_extractor
a =False
def SCREAMING_SNAKE_CASE ( self , __A=None , __A=None , __A=True ) -> int:
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self , *__A , **__A ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
a =kwargs.pop('''audio''' , __A )
a =kwargs.pop('''sampling_rate''' , __A )
a =kwargs.pop('''text''' , __A )
if len(__A ) > 0:
a =args[0]
a =args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
a =self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if text is not None:
a =self.tokenizer(__A , **__A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a =encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__A , **__A )
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Union[str, Any]:
return self.tokenizer.decode(*__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A="np" ) -> Optional[Any]:
return self.tokenizer.get_prompt_ids(__A , return_tensors=__A ) | 215 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : List[Any] = StableDiffusionPanoramaPipeline
__lowerCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
__lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_lowerCAmelCase = DDIMScheduler()
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_lowerCAmelCase = CLIPTextModel(_lowerCAmelCase )
_lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_lowerCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Optional[Any]:
_lowerCAmelCase = torch.manual_seed(_lowerCAmelCase )
_lowerCAmelCase = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
_lowerCAmelCase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
_lowerCAmelCase = sd_pipe(**_lowerCAmelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Union[str, Any]:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
_lowerCAmelCase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
_lowerCAmelCase = "french fries"
_lowerCAmelCase = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
_lowerCAmelCase = output.images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
_lowerCAmelCase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
_lowerCAmelCase = sd_pipe(**_lowerCAmelCase , view_batch_size=2 )
_lowerCAmelCase = output.images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" )
_lowerCAmelCase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
_lowerCAmelCase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
_lowerCAmelCase = sd_pipe(**_lowerCAmelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=_lowerCAmelCase )
_lowerCAmelCase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
_lowerCAmelCase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
_lowerCAmelCase = sd_pipe(**_lowerCAmelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _lowerCAmelCase=0 ) -> List[str]:
_lowerCAmelCase = torch.manual_seed(_lowerCAmelCase )
_lowerCAmelCase = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ) -> int:
_lowerCAmelCase = "stabilityai/stable-diffusion-2-base"
_lowerCAmelCase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="scheduler" )
_lowerCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase = self.get_inputs()
_lowerCAmelCase = pipe(**_lowerCAmelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_lowerCAmelCase = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=_lowerCAmelCase )
_lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase = self.get_inputs()
_lowerCAmelCase = pipe(**_lowerCAmelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_lowerCAmelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = 0
def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None:
_lowerCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_lowerCAmelCase = latents[0, -3:, -3:, -1]
_lowerCAmelCase = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_lowerCAmelCase = latents[0, -3:, -3:, -1]
_lowerCAmelCase = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_lowerCAmelCase = False
_lowerCAmelCase = "stabilityai/stable-diffusion-2-base"
_lowerCAmelCase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="scheduler" )
_lowerCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
_lowerCAmelCase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase = self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _snake_case ( self ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase = "stabilityai/stable-diffusion-2-base"
_lowerCAmelCase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="scheduler" )
_lowerCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
_lowerCAmelCase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase = self.get_inputs()
_lowerCAmelCase = pipe(**_lowerCAmelCase )
_lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 158 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[Any] = ["pixel_values"]
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , **_lowerCAmelCase , ) -> None:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = size if size is not None else {"shortest_edge": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else {"height": 256, "width": 256}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_flip_channel_order
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PIL.Image.BILINEAR , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=size["shortest_edge"] , default_to_square=_lowerCAmelCase )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Optional[Any]:
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> np.ndarray:
return flip_channel_order(_lowerCAmelCase , data_format=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> PIL.Image.Image:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
_lowerCAmelCase = [self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_lowerCAmelCase = [self.flip_channel_order(image=_lowerCAmelCase ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> int:
_lowerCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(_lowerCAmelCase ):
_lowerCAmelCase = target_sizes.numpy()
_lowerCAmelCase = []
for idx in range(len(_lowerCAmelCase ) ):
_lowerCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=_lowerCAmelCase )
_lowerCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowerCAmelCase )
else:
_lowerCAmelCase = logits.argmax(dim=1 )
_lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 158 | 1 |
def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int]=False ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = len(set_a.intersection(__SCREAMING_SNAKE_CASE ) )
if alternative_union:
__lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = len(set_a.union(__SCREAMING_SNAKE_CASE ) )
return intersection / union
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
__lowerCAmelCase = [element for element in set_a if element in set_b]
if alternative_union:
__lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE )
return len(__SCREAMING_SNAKE_CASE ) / union
else:
__lowerCAmelCase = set_a + [element for element in set_b if element not in set_a]
return len(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE )
return len(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE )
return None
if __name__ == "__main__":
UpperCamelCase__ = {"""a""", """b""", """c""", """d""", """e"""}
UpperCamelCase__ = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 369 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class a__ :
@staticmethod
def __SCREAMING_SNAKE_CASE( *_A , **_A ):
"""simple docstring"""
pass
def _a ( SCREAMING_SNAKE_CASE_ : Image ):
__lowerCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class a__ ( unittest.TestCase ):
_a : Tuple = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = DepthEstimationPipeline(model=_A , image_processor=_A )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , _A )
import datasets
__lowerCAmelCase = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
__lowerCAmelCase = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , _A , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@slow
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "Intel/dpt-large"
__lowerCAmelCase = pipeline("depth-estimation" , model=_A )
__lowerCAmelCase = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
__lowerCAmelCase = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.3_04 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.6_62 )
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 102 | 0 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase_ : int = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __A :
"""simple docstring"""
def __init__( self , __A , __A=16 , __A=13 , __A=7 , __A=14 , __A=10 , __A=19 , __A=5 , __A=4 , __A=True , __A=16 , __A=2 , __A=4 , __A=4 , __A="gelu" , __A=0.1 , __A=0.1 , __A=[1, 2, 3, 4, 5] , __A=25 , __A=5 , ) -> Optional[Any]:
a =d_model
a =parent
a =batch_size
a =prediction_length
a =context_length
a =cardinality
a =num_time_features
a =lags_sequence
a =embedding_dimension
a =is_training
a =hidden_size
a =num_hidden_layers
a =num_attention_heads
a =intermediate_size
a =hidden_act
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =context_length
a =prediction_length + label_length
a =label_length
a =moving_average
a =autocorrelation_factor
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Any:
a =config.context_length + max(config.lags_sequence )
a =ids_tensor([self.batch_size, 1] , config.cardinality[0] )
a =floats_tensor([self.batch_size, _past_length, config.num_time_features] )
a =floats_tensor([self.batch_size, _past_length] )
a =floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
a =floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
a =floats_tensor([self.batch_size, config.prediction_length] )
a ={
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =self.get_config()
a =self.prepare_autoformer_inputs_dict(__A )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a , a =self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> Optional[Any]:
a =AutoformerModel(config=__A ).to(__A ).eval()
a =model(**__A )
a =outputs.encoder_last_hidden_state
a =outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
a =model.get_encoder()
encoder.save_pretrained(__A )
a =AutoformerEncoder.from_pretrained(__A ).to(__A )
a , a , a , a , a =model.create_network_inputs(**__A )
a , a =model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
a =torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
a =encoder(inputs_embeds=__A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
a =(
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
a =torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
a =torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
a =torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
a =model.get_decoder()
decoder.save_pretrained(__A )
a =AutoformerDecoder.from_pretrained(__A ).to(__A )
a =decoder(
trend=__A , inputs_embeds=__A , encoder_hidden_states=__A , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__lowerCAmelCase = (AutoformerForPrediction,) if is_torch_available() else ()
__lowerCAmelCase = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =AutoformerModelTester(self )
a =ConfigTester(self , config_class=__A , has_text_modality=__A )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a , a =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
a =model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
a , a =model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info['''missing_keys'''] , [] )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =inspect.signature(getattr(__A , '''forward''' ) )
# The main input is the name of the argument after `self`
a =list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =model_class(__A )
a =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a =[*signature.parameters.keys()]
a =[
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(__A )] , __A )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a , a =self.model_tester.prepare_config_and_inputs_for_common()
a =True
a =getattr(self.model_tester , '''seq_length''' , __A )
a =getattr(self.model_tester , '''decoder_seq_length''' , __A )
a =getattr(self.model_tester , '''encoder_seq_length''' , __A )
a =getattr(self.model_tester , '''d_model''' , __A )
a =getattr(self.model_tester , '''num_attention_heads''' , __A )
a =d_model // num_attention_heads
for model_class in self.all_model_classes:
a =True
a =False
a =True
a =model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__A , __A ) )
a =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a =True
a =model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__A , __A ) )
a =outputs.encoder_attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
a =len(__A )
a =7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__A , __A )
# decoder attentions
a =outputs.decoder_attentions
self.assertIsInstance(__A , (list, tuple) )
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
a =outputs.cross_attentions
self.assertIsInstance(__A , (list, tuple) )
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
a =True
a =True
a =model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + 2 , len(__A ) )
a =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def SCREAMING_SNAKE_CASE ( self ) -> int:
super().test_retain_grad_hidden_states_attentions()
def _A ( lowercase="train-batch.pt" ):
"""simple docstring"""
a =hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=lowercase , repo_type='''dataset''' )
a =torch.load(lowercase , map_location=lowercase )
return batch
@require_torch
@slow
class __A ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(__A )
a =prepare_batch()
with torch.no_grad():
a =model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
a =torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __A )
a =torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=__A )
self.assertTrue(torch.allclose(output[0, :3, :3] , __A , atol=__A ) )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(__A )
a =prepare_batch('''val-batch.pt''' )
with torch.no_grad():
a =model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
a =torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __A )
a =torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=__A )
self.assertTrue(torch.allclose(output[0, :3, :3] , __A , atol=__A ) )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(__A )
a =prepare_batch('''val-batch.pt''' )
with torch.no_grad():
a =model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
a =torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __A )
a =torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=__A )
a =outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __A , rtol=1E-1 ) ) | 81 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 81 | 1 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
UpperCAmelCase : int = logging.get_logger(__name__)
class lowerCamelCase__ ( __a ):
"""simple docstring"""
__a = CLIPConfig
__a = ["""CLIPEncoderLayer"""]
def __init__( self : Dict , UpperCamelCase : CLIPConfig ):
'''simple docstring'''
super().__init__(UpperCamelCase__ )
__UpperCAmelCase : Union[str, Any] = CLIPVisionModelWithProjection(config.vision_config )
__UpperCAmelCase : List[str] = nn.Linear(config.vision_config.projection_dim , 1 )
__UpperCAmelCase : Any = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowerCamelCase__ ( self : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Union[str, Any]=0.5 , UpperCamelCase : Tuple=0.5 ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.vision_model(UpperCamelCase__ )[0]
__UpperCAmelCase : List[Any] = self.p_head(UpperCamelCase__ )
__UpperCAmelCase : str = nsfw_detected.flatten()
__UpperCAmelCase : List[str] = nsfw_detected > p_threshold
__UpperCAmelCase : List[str] = nsfw_detected.tolist()
if any(UpperCamelCase__ ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(UpperCamelCase__ ):
if nsfw_detected_:
__UpperCAmelCase : List[str] = np.zeros(images[idx].shape )
__UpperCAmelCase : List[Any] = self.w_head(UpperCamelCase__ )
__UpperCAmelCase : List[str] = watermark_detected.flatten()
__UpperCAmelCase : Union[str, Any] = watermark_detected > w_threshold
__UpperCAmelCase : List[str] = watermark_detected.tolist()
if any(UpperCamelCase__ ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(UpperCamelCase__ ):
if watermark_detected_:
__UpperCAmelCase : Tuple = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 369 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = LEDTokenizer
__a = LEDTokenizerFast
__a = True
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
super().setUp()
__UpperCAmelCase : Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
__UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase ) )
def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : str , UpperCamelCase : Any ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@require_torch
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , UpperCamelCase )
self.assertIn("""attention_mask""" , UpperCamelCase )
self.assertNotIn("""labels""" , UpperCamelCase )
self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase )
@require_torch
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : str = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""]
__UpperCAmelCase : int = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : Optional[Any] = inputs["""input_ids"""]
__UpperCAmelCase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""]
__UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase )
__UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]]
__UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Any = """A, <mask> AllenNLP sentence."""
__UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
__UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 320 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_snake_case = "CompVis/stable-diffusion-v1-1"
_snake_case = "CompVis/stable-diffusion-v1-2"
_snake_case = "CompVis/stable-diffusion-v1-3"
_snake_case = "CompVis/stable-diffusion-v1-4"
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a, __a, __a, __a, __a, __a, __a = True, ):
'''simple docstring'''
super()._init_()
_lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(__a)
_lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(__a)
_lowerCAmelCase : str = StableDiffusionPipeline.from_pretrained(__a)
_lowerCAmelCase : str = StableDiffusionPipeline(
vae=__a, text_encoder=__a, tokenizer=__a, unet=__a, scheduler=__a, safety_checker=__a, feature_extractor=__a, requires_safety_checker=__a, )
self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea)
@property
def snake_case__ ( self):
'''simple docstring'''
return {k: getattr(self, __a) for k in self.config.keys() if not k.startswith("_")}
def snake_case__ ( self, __a = "auto"):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a)
def snake_case__ ( self):
'''simple docstring'''
self.enable_attention_slicing(__a)
@torch.no_grad()
def snake_case__ ( self, __a, __a = 512, __a = 512, __a = 50, __a = 7.5, __a = None, __a = 1, __a = 0.0, __a = None, __a = None, __a = "pil", __a = True, __a = None, __a = 1, **__a, ):
'''simple docstring'''
return self.pipea(
prompt=__a, height=__a, width=__a, num_inference_steps=__a, guidance_scale=__a, negative_prompt=__a, num_images_per_prompt=__a, eta=__a, generator=__a, latents=__a, output_type=__a, return_dict=__a, callback=__a, callback_steps=__a, **__a, )
@torch.no_grad()
def snake_case__ ( self, __a, __a = 512, __a = 512, __a = 50, __a = 7.5, __a = None, __a = 1, __a = 0.0, __a = None, __a = None, __a = "pil", __a = True, __a = None, __a = 1, **__a, ):
'''simple docstring'''
return self.pipea(
prompt=__a, height=__a, width=__a, num_inference_steps=__a, guidance_scale=__a, negative_prompt=__a, num_images_per_prompt=__a, eta=__a, generator=__a, latents=__a, output_type=__a, return_dict=__a, callback=__a, callback_steps=__a, **__a, )
@torch.no_grad()
def snake_case__ ( self, __a, __a = 512, __a = 512, __a = 50, __a = 7.5, __a = None, __a = 1, __a = 0.0, __a = None, __a = None, __a = "pil", __a = True, __a = None, __a = 1, **__a, ):
'''simple docstring'''
return self.pipea(
prompt=__a, height=__a, width=__a, num_inference_steps=__a, guidance_scale=__a, negative_prompt=__a, num_images_per_prompt=__a, eta=__a, generator=__a, latents=__a, output_type=__a, return_dict=__a, callback=__a, callback_steps=__a, **__a, )
@torch.no_grad()
def snake_case__ ( self, __a, __a = 512, __a = 512, __a = 50, __a = 7.5, __a = None, __a = 1, __a = 0.0, __a = None, __a = None, __a = "pil", __a = True, __a = None, __a = 1, **__a, ):
'''simple docstring'''
return self.pipea(
prompt=__a, height=__a, width=__a, num_inference_steps=__a, guidance_scale=__a, negative_prompt=__a, num_images_per_prompt=__a, eta=__a, generator=__a, latents=__a, output_type=__a, return_dict=__a, callback=__a, callback_steps=__a, **__a, )
@torch.no_grad()
def snake_case__ ( self, __a, __a = 512, __a = 512, __a = 50, __a = 7.5, __a = None, __a = 1, __a = 0.0, __a = None, __a = None, __a = "pil", __a = True, __a = None, __a = 1, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = "cuda" if torch.cuda.is_available() else "cpu"
self.to(__a)
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}.")
# Get first result from Stable Diffusion Checkpoint v1.1
_lowerCAmelCase : Any = self.textaimg_sda_a(
prompt=__a, height=__a, width=__a, num_inference_steps=__a, guidance_scale=__a, negative_prompt=__a, num_images_per_prompt=__a, eta=__a, generator=__a, latents=__a, output_type=__a, return_dict=__a, callback=__a, callback_steps=__a, **__a, )
# Get first result from Stable Diffusion Checkpoint v1.2
_lowerCAmelCase : int = self.textaimg_sda_a(
prompt=__a, height=__a, width=__a, num_inference_steps=__a, guidance_scale=__a, negative_prompt=__a, num_images_per_prompt=__a, eta=__a, generator=__a, latents=__a, output_type=__a, return_dict=__a, callback=__a, callback_steps=__a, **__a, )
# Get first result from Stable Diffusion Checkpoint v1.3
_lowerCAmelCase : str = self.textaimg_sda_a(
prompt=__a, height=__a, width=__a, num_inference_steps=__a, guidance_scale=__a, negative_prompt=__a, num_images_per_prompt=__a, eta=__a, generator=__a, latents=__a, output_type=__a, return_dict=__a, callback=__a, callback_steps=__a, **__a, )
# Get first result from Stable Diffusion Checkpoint v1.4
_lowerCAmelCase : Union[str, Any] = self.textaimg_sda_a(
prompt=__a, height=__a, width=__a, num_inference_steps=__a, guidance_scale=__a, negative_prompt=__a, num_images_per_prompt=__a, eta=__a, generator=__a, latents=__a, output_type=__a, return_dict=__a, callback=__a, callback_steps=__a, **__a, )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]])
| 36 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase__ : List[Any] =None
lowerCAmelCase__ : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : int ={
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : int ={
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
lowerCAmelCase__ : Dict =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Dict = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ : List[str] = NllbTokenizer
UpperCamelCase__ : List[int] = []
UpperCamelCase__ : List[int] = []
def __init__( self , _A=None , _A=None , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=None , _A=None , _A=None , _A=False , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
__SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , src_lang=_A , tgt_lang=_A , additional_special_tokens=_A , legacy_behaviour=_A , **_A , )
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
__SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(_A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else 'eng_Latn'
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
__SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _A ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _A ( self , _A , _A = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _A ( self , _A , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A ( self , _A , _A , _A , _A , **_A ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__SCREAMING_SNAKE_CASE = src_lang
__SCREAMING_SNAKE_CASE = self(_A , add_special_tokens=_A , return_tensors=_A , **_A )
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_A )
__SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def _A ( self , _A , _A = "eng_Latn" , _A = None , _A = "fra_Latn" , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = src_lang
__SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(_A , _A , **_A )
def _A ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _A ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_A )
if self.legacy_behaviour:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
__SCREAMING_SNAKE_CASE = [self.cur_lang_code]
__SCREAMING_SNAKE_CASE = [self.eos_token_id]
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
__SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_A )
if self.legacy_behaviour:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
__SCREAMING_SNAKE_CASE = [self.cur_lang_code]
__SCREAMING_SNAKE_CASE = [self.eos_token_id]
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
__SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _A ( self , _A , _A = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 257 | 0 |
"""simple docstring"""
_a : Tuple = 9.8_06_65
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float = g ) -> float:
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 126 | """simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
_lowerCAmelCase : Tuple = torch.nn.Linear(2 ,4 )
_lowerCAmelCase : Union[str, Any] = torch.optim.AdamW(model.parameters() ,lr=1.0 )
_lowerCAmelCase : Tuple = torch.optim.lr_scheduler.OneCycleLR(_lowerCamelCase ,max_lr=0.01 ,steps_per_epoch=2 ,epochs=1 )
_lowerCAmelCase : Tuple = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
_lowerCAmelCase : List[Any] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> int:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Any:
_lowerCAmelCase : List[str] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_lowerCamelCase )
class __A ( SCREAMING_SNAKE_CASE_ ):
@require_cuda
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a__ ):
_lowerCAmelCase : Tuple = Accelerator(cpu=a__ )
def __A ( self ):
_lowerCAmelCase : Dict = Accelerator()
_lowerCAmelCase : Any = GradientState()
assert state.num_steps == 1
_lowerCAmelCase : Optional[int] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
_lowerCAmelCase : Dict = False
assert state.sync_gradients is False
GradientState._reset_state()
def __A ( self ):
_lowerCAmelCase : Optional[int] = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = create_components()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : int = accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __A ( self ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a__ , **a__ ):
pass
with patch("""torch.cuda.set_device""" , a__ ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
_lowerCAmelCase : Dict = Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def __A ( self ):
_lowerCAmelCase : Any = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
_lowerCAmelCase : List[Any] = get_signature(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 )
def __A ( self ):
_lowerCAmelCase : str = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
_lowerCAmelCase : Optional[Any] = get_signature(a__ )
# saving hook
def save_config(a__ , a__ , a__ ):
_lowerCAmelCase : Dict = {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(a__ , """data.json""" ) , """w""" ) as f:
json.dump(a__ , a__ )
# loading hook
def load_config(a__ , a__ ):
with open(os.path.join(a__ , """data.json""" ) , """r""" ) as f:
_lowerCAmelCase : int = json.load(a__ )
_lowerCAmelCase : str = config["""class_name"""]
_lowerCAmelCase : Union[str, Any] = accelerator.register_save_state_pre_hook(a__ )
_lowerCAmelCase : int = accelerator.register_load_state_pre_hook(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match with hooks
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
_lowerCAmelCase : Dict = """random"""
# make sure loaded weights match with hooks
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match with hooks removed
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
_lowerCAmelCase : Optional[Any] = """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = create_components()
_lowerCAmelCase : Any = None
# This should work
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ , a__ )
self.assertTrue(dummy_obj is None )
def __A ( self ):
_lowerCAmelCase : str = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = create_components()
_lowerCAmelCase : Optional[int] = [1, 2, 3]
# This should work
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ , a__ )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def __A ( self ):
from transformers import AutoModelForCausalLM
_lowerCAmelCase : List[str] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=a__ , device_map={"""""": 0} , )
_lowerCAmelCase : List[str] = Accelerator()
# This should work
_lowerCAmelCase : List[Any] = accelerator.prepare(a__ )
@slow
@require_bnb
def __A ( self ):
from transformers import AutoModelForCausalLM
_lowerCAmelCase : Any = Accelerator()
with init_empty_weights():
_lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
_lowerCAmelCase : int = infer_auto_device_map(a__ )
_lowerCAmelCase : Optional[Any] = """cpu"""
_lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=a__ , load_in_abit=a__ , llm_inta_enable_fpaa_cpu_offload=a__ )
# This should not work and get value error
with self.assertRaises(a__ ):
_lowerCAmelCase : List[str] = accelerator.prepare(a__ )
@slow
@require_bnb
@require_multi_gpu
def __A ( self ):
from transformers import AutoModelForCausalLM
_lowerCAmelCase : Dict = {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
_lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
_lowerCAmelCase : List[str] = infer_auto_device_map(a__ )
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : int = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=a__ , device_map=a__ , )
_lowerCAmelCase : Tuple = Accelerator()
# This should not work and get value error
with self.assertRaises(a__ ):
_lowerCAmelCase : Optional[int] = accelerator.prepare(a__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __A ( self ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
_lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
_lowerCAmelCase : int = infer_auto_device_map(a__ )
_lowerCAmelCase : List[Any] = 1
_lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=a__ , device_map=a__ , )
_lowerCAmelCase : str = Accelerator()
# This should work
_lowerCAmelCase : str = accelerator.prepare(a__ )
@require_cuda
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Any = torch.optim.SGD(model.parameters() , lr=0.0_1 )
_lowerCAmelCase : List[str] = Accelerator(cpu=a__ )
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
| 126 | 1 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
a_ : Optional[Any] = False
a_ : int = False
def __snake_case ( UpperCAmelCase_ : Namespace ):
return TrainCommand(UpperCAmelCase_ )
class snake_case ( lowercase ):
"""simple docstring"""
@staticmethod
def snake_case ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=UpperCamelCase , required=UpperCamelCase , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=UpperCamelCase , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=UpperCamelCase , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=UpperCamelCase , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=UpperCamelCase , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=UpperCamelCase , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=UpperCamelCase , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=UpperCamelCase , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=UpperCamelCase , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=UpperCamelCase , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=UpperCamelCase , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=UpperCamelCase , default=3e-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=UpperCamelCase , default=1e-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=UpperCamelCase )
def __init__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = logging.get_logger("transformers-cli/training" )
lowerCamelCase_ = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=UpperCamelCase )
lowerCamelCase_ = args.output
lowerCamelCase_ = args.column_label
lowerCamelCase_ = args.column_text
lowerCamelCase_ = args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
lowerCamelCase_ = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''' )
lowerCamelCase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCamelCase_ = None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''' )
lowerCamelCase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCamelCase_ = args.validation_split
lowerCamelCase_ = args.train_batch_size
lowerCamelCase_ = args.valid_batch_size
lowerCamelCase_ = args.learning_rate
lowerCamelCase_ = args.adam_epsilon
def snake_case ( self ):
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def snake_case ( self ):
"""simple docstring"""
raise NotImplementedError
def snake_case ( self ):
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 55 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowerCAmelCase = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ):
if attention_mask is None:
_snake_case = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_snake_case = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_snake_case = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=99 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=0.02 , ) -> Union[str, Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = initializer_range
def lowercase (self ) -> str:
_snake_case = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_snake_case = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_snake_case = shift_tokens_right(UpperCAmelCase , 1 , 2 )
_snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase , )
_snake_case = prepare_blenderbot_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, inputs_dict
def lowercase (self ) -> Dict:
_snake_case, _snake_case = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
_snake_case = 20
_snake_case = model_class_name(UpperCAmelCase )
_snake_case = model.encode(inputs_dict["""input_ids"""] )
_snake_case, _snake_case = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_snake_case = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase )
_snake_case = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_snake_case = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_snake_case = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
_snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_snake_case = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase , )
_snake_case = model.decode(UpperCAmelCase , UpperCAmelCase )
_snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
_snake_case = 20
_snake_case = model_class_name(UpperCAmelCase )
_snake_case = model.encode(inputs_dict["""input_ids"""] )
_snake_case, _snake_case = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_snake_case = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_snake_case = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase )
_snake_case = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_snake_case = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
_snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_snake_case = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
_snake_case = model.decode(UpperCAmelCase , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase )
_snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = 99
def lowercase (self ) -> Any:
_snake_case = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_snake_case = input_ids.shape[0]
_snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase (self ) -> Optional[Any]:
_snake_case, _snake_case, _snake_case = self._get_config_and_data()
_snake_case = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase )
_snake_case = lm_model(input_ids=UpperCAmelCase )
_snake_case = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , UpperCAmelCase )
def lowercase (self ) -> int:
_snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_snake_case = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase )
_snake_case = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_snake_case = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_snake_case = lm_model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase )
_snake_case = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , UpperCAmelCase )
def lowercase (self ) -> Tuple:
_snake_case = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_snake_case = shift_tokens_right(UpperCAmelCase , 1 , 2 )
_snake_case = np.equal(UpperCAmelCase , 1 ).astype(np.floataa ).sum()
_snake_case = np.equal(UpperCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _lowerCAmelCase ( __snake_case , unittest.TestCase , __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase (self ) -> Any:
_snake_case = FlaxBlenderbotModelTester(self )
def lowercase (self ) -> str:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowercase (self ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowercase (self ) -> Dict:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_snake_case = model_class(UpperCAmelCase )
@jax.jit
def encode_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ):
return model.encode(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
_snake_case = encode_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_snake_case = encode_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase (self ) -> str:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case = model_class(UpperCAmelCase )
_snake_case = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_snake_case = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
return model.decode(
decoder_input_ids=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , encoder_outputs=UpperCAmelCase , )
with self.subTest("""JIT Enabled""" ):
_snake_case = decode_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_snake_case = decode_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase (self ) -> Any:
for model_class_name in self.all_model_classes:
_snake_case = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_snake_case = np.ones((1, 1) ) * model.config.eos_token_id
_snake_case = model(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" )
@slow
def lowercase (self ) -> Dict:
_snake_case = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
_snake_case = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
_snake_case = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=UpperCAmelCase )
_snake_case = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
_snake_case = ["""Sam"""]
_snake_case = tokenizer(UpperCAmelCase , return_tensors="""jax""" )
_snake_case = model.generate(**UpperCAmelCase , **UpperCAmelCase )
_snake_case = """Sam is a great name. It means \"sun\" in Gaelic."""
_snake_case = tokenizer.batch_decode(UpperCAmelCase , **UpperCAmelCase )
assert generated_txt[0].strip() == tgt_text | 341 | 0 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A_ ( A__ = 3 ) -> qiskit.result.counts.Counts:
if isinstance(A__ , A__ ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(A__ ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
a__ : Optional[Any] = QuantumRegister(A__ , 'qr' )
a__ : Optional[int] = ClassicalRegister(A__ , 'cr' )
a__ : Union[str, Any] = QuantumCircuit(A__ , A__ )
a__ : Any = number_of_qubits
for i in range(A__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(A__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , A__ , A__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(A__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(A__ , A__ )
# simulate with 10000 shots
a__ : Optional[Any] = Aer.get_backend('qasm_simulator' )
a__ : str = execute(A__ , A__ , shots=1_0000 )
return job.result().get_counts(A__ )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 225 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowercase : Optional[Any] = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
lowercase : List[str] = {
"""RUCAIBox/mvp""": 1_0_2_4,
}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Dict = VOCAB_FILES_NAMES
__A : str = PRETRAINED_VOCAB_FILES_MAP
__A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Any = ['''input_ids''', '''attention_mask''']
__A : Tuple = MvpTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
a__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : Dict = getattr(lowercase , pre_tok_state.pop('type'))
a__ : str = add_prefix_space
a__ : Union[str, Any] = pre_tok_class(**lowercase)
a__ : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a__ : Optional[int] = 'post_processor'
a__ : Optional[int] = getattr(self.backend_tokenizer , lowercase , lowercase)
if tokenizer_component_instance:
a__ : str = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Any = tuple(state['sep'])
if "cls" in state:
a__ : str = tuple(state['cls'])
a__ : List[str] = False
if state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : List[str] = add_prefix_space
a__ : List[str] = True
if state.get('trim_offsets' , lowercase) != trim_offsets:
a__ : Optional[int] = trim_offsets
a__ : List[str] = True
if changes_to_apply:
a__ : Optional[int] = getattr(lowercase , state.pop('type'))
a__ : Tuple = component_class(**lowercase)
setattr(self.backend_tokenizer , lowercase , lowercase)
@property
def __lowercase ( self) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def __lowercase ( self , lowercase) -> Any:
'''simple docstring'''
a__ : Tuple = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else value
a__ : str = value
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : Optional[Any] = kwargs.get('is_split_into_words' , lowercase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowercase , **lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : List[str] = kwargs.get('is_split_into_words' , lowercase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowercase , **lowercase)
def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
a__ : List[str] = self._tokenizer.model.save(lowercase , name=lowercase)
return tuple(lowercase)
def __lowercase ( self , lowercase , lowercase=None) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : int = [self.sep_token_id]
a__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 225 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_( a__ , a__ , unittest.TestCase ):
__UpperCamelCase = StableDiffusionXLImgaImgPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase__ ( self : int ):
torch.manual_seed(0 )
lowerCAmelCase : str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
lowerCAmelCase : Optional[Any] = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
lowerCAmelCase : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , )
lowerCAmelCase : int = CLIPTextModel(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=UpperCamelCase_ )
lowerCAmelCase : str = CLIPTextModelWithProjection(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int=0 ):
lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = image / 2 + 0.5
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase : Any = torch.manual_seed(UpperCamelCase_ )
else:
lowerCAmelCase : str = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowerCAmelCase : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Optional[Any] = self.get_dummy_components()
lowerCAmelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**UpperCamelCase_ )
lowerCAmelCase : Tuple = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase : List[str] = self.get_dummy_inputs(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = sd_pipe(**UpperCamelCase_ ).images
lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase : Optional[Any] = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : int ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self : int ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self : str ):
pass
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = self.get_dummy_components()
lowerCAmelCase : int = StableDiffusionXLImgaImgPipeline(**UpperCamelCase_ )
lowerCAmelCase : int = sd_pipe.to(UpperCamelCase_ )
lowerCAmelCase : Any = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
# forward without prompt embeds
lowerCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ )
lowerCAmelCase : Tuple = 3 * ['''this is a negative prompt''']
lowerCAmelCase : Dict = negative_prompt
lowerCAmelCase : Tuple = 3 * [inputs['''prompt''']]
lowerCAmelCase : Union[str, Any] = sd_pipe(**UpperCamelCase_ )
lowerCAmelCase : str = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase_ )
lowerCAmelCase : int = 3 * ['''this is a negative prompt''']
lowerCAmelCase : Optional[int] = 3 * [inputs.pop('''prompt''' )]
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : Tuple = sd_pipe.encode_prompt(UpperCamelCase_ , negative_prompt=UpperCamelCase_ )
lowerCAmelCase : Any = sd_pipe(
**UpperCamelCase_ , prompt_embeds=UpperCamelCase_ , negative_prompt_embeds=UpperCamelCase_ , pooled_prompt_embeds=UpperCamelCase_ , negative_pooled_prompt_embeds=UpperCamelCase_ , )
lowerCAmelCase : str = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any]="cpu" , UpperCamelCase_ : Optional[int]=torch.floataa , UpperCamelCase_ : int=0 ):
lowerCAmelCase : Union[str, Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = np.random.RandomState(UpperCamelCase_ ).standard_normal((1, 4, 6_4, 6_4) )
lowerCAmelCase : Dict = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
lowerCAmelCase : str = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase : List[str] = self.get_inputs(UpperCamelCase_ )
lowerCAmelCase : Any = pipe(**UpperCamelCase_ ).images
lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase : Dict = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 60 | import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a : int = logging.get_logger(__name__)
__a : str = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Optional[int] = '''deta'''
__a : Optional[int] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=9_00 , lowerCAmelCase__=20_48 , lowerCAmelCase__=6 , lowerCAmelCase__=20_48 , lowerCAmelCase__=8 , lowerCAmelCase__=6 , lowerCAmelCase__=10_24 , lowerCAmelCase__=8 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=2_56 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1.0 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="sine" , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=True , lowerCAmelCase__=3_00 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=1 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.25 , **lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__lowercase = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase = backbone_config.pop('''model_type''' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(lowerCAmelCase__ )
__lowercase = backbone_config
__lowercase = num_queries
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = encoder_layerdrop
__lowercase = auxiliary_loss
__lowercase = position_embedding_type
# deformable attributes
__lowercase = num_feature_levels
__lowercase = encoder_n_points
__lowercase = decoder_n_points
__lowercase = two_stage
__lowercase = two_stage_num_proposals
__lowercase = with_box_refine
__lowercase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = mask_loss_coefficient
__lowercase = dice_loss_coefficient
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
__lowercase = focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self.d_model
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output | 210 | 0 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( A ):
def _lowerCamelCase ( self : int) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(A , 'hidden_sizes'))
self.parent.assertTrue(hasattr(A , 'num_attention_heads'))
self.parent.assertTrue(hasattr(A , 'num_encoder_blocks'))
class __lowerCAmelCase :
def __init__( self : Optional[Any] , A : List[str] , A : List[Any]=13 , A : List[str]=64 , A : List[Any]=3 , A : Any=4 , A : Optional[Any]=[2, 2, 2, 2] , A : List[Any]=[8, 4, 2, 1] , A : Union[str, Any]=[16, 32, 64, 1_28] , A : Dict=[1, 4, 8, 16] , A : Optional[Any]=[1, 2, 4, 8] , A : Union[str, Any]=True , A : Optional[int]=True , A : Optional[int]="gelu" , A : str=0.1 , A : Union[str, Any]=0.1 , A : List[Any]=0.0_2 , A : Any=3 , A : Union[str, Any]=None , ) -> int:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_encoder_blocks
_UpperCAmelCase = sr_ratios
_UpperCAmelCase = depths
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = downsampling_rates
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
def _lowerCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : Any) -> Any:
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : str , A : List[Any] , A : int , A : Optional[int]) -> str:
"""simple docstring"""
_UpperCAmelCase = SegformerModel(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A)
_UpperCAmelCase = _UpperCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def _lowerCamelCase ( self : Any , A : int , A : Any , A : Optional[int]) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = SegformerForSemanticSegmentation(A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
_UpperCAmelCase = model(A , labels=A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def _lowerCamelCase ( self : Dict , A : Union[str, Any] , A : Any , A : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = 1
_UpperCAmelCase = SegformerForSemanticSegmentation(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(A)
_UpperCAmelCase = model(A , labels=A)
self.parent.assertGreater(result.loss , 0.0)
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : Dict) -> str:
"""simple docstring"""
_UpperCAmelCase = SegformerModelTester(self)
_UpperCAmelCase = SegformerConfigTester(self , config_class=A)
def _lowerCamelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A)
def _lowerCamelCase ( self : Dict) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*A)
def _lowerCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*A)
@unittest.skip('SegFormer does not use inputs_embeds')
def _lowerCamelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods')
def _lowerCamelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
pass
def _lowerCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(A)
_UpperCAmelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A)
def _lowerCamelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(A)
model.to(A)
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(A , A))
_UpperCAmelCase = outputs.attentions
_UpperCAmelCase = sum(self.model_tester.depths)
self.assertEqual(len(A) , A)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(A)
model.to(A)
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(A , A))
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(A) , A)
# verify the first attentions (first block, first layer)
_UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
_UpperCAmelCase = (self.model_tester.image_size // 32) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
_UpperCAmelCase = len(A)
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(A)
model.to(A)
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(A , A))
self.assertEqual(out_len + 1 , len(A))
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(A) , A)
# verify the first attentions (first block, first layer)
_UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowerCamelCase ( self : str) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(A : Union[str, Any] , A : List[Any] , A : List[str]):
_UpperCAmelCase = model_class(A)
model.to(A)
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(A , A))
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(A) , A)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(A , A , A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(A , A , A)
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(A):
continue
_UpperCAmelCase = model_class(A)
model.to(A)
model.train()
_UpperCAmelCase = self._prepare_for_class(A , A , return_labels=A)
_UpperCAmelCase = model(**A).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def _lowerCamelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
pass
@slow
def _lowerCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = SegformerModel.from_pretrained(A)
self.assertIsNotNone(A)
def A ( ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int) -> int:
"""simple docstring"""
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=A , align=A , do_random_crop=A)
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
A)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt')
_UpperCAmelCase = encoded_inputs.pixel_values.to(A)
with torch.no_grad():
_UpperCAmelCase = model(A)
_UpperCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28))
self.assertEqual(outputs.logits.shape , A)
_UpperCAmelCase = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
]).to(A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A , atol=1E-4))
@slow
def _lowerCamelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=A , align=A , do_random_crop=A)
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024').to(A)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt')
_UpperCAmelCase = encoded_inputs.pixel_values.to(A)
with torch.no_grad():
_UpperCAmelCase = model(A)
_UpperCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28))
self.assertEqual(outputs.logits.shape , A)
_UpperCAmelCase = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
]).to(A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , A , atol=1E-1))
@slow
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=A , align=A , do_random_crop=A)
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
A)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt')
_UpperCAmelCase = encoded_inputs.pixel_values.to(A)
with torch.no_grad():
_UpperCAmelCase = model(A)
_UpperCAmelCase = outputs.logits.detach().cpu()
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(5_00, 3_00)])
_UpperCAmelCase = torch.Size((5_00, 3_00))
self.assertEqual(segmentation[0].shape , A)
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=A)
_UpperCAmelCase = torch.Size((1_28, 1_28))
self.assertEqual(segmentation[0].shape , A)
| 290 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
UpperCAmelCase__ = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __lowerCAmelCase :
def __init__( self : List[str] , A : int = 14) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('Unsupported Group')
_UpperCAmelCase = primes[group]['prime']
_UpperCAmelCase = primes[group]['generator']
_UpperCAmelCase = int(hexlify(urandom(32)) , base=16)
def _lowerCamelCase ( self : int) -> str:
"""simple docstring"""
return hex(self.__private_key)[2:]
def _lowerCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = pow(self.generator , self.__private_key , self.prime)
return hex(A)[2:]
def _lowerCamelCase ( self : Tuple , A : int) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime) == 1
)
def _lowerCamelCase ( self : Optional[int] , A : str) -> str:
"""simple docstring"""
_UpperCAmelCase = int(A , base=16)
if not self.is_valid_public_key(A):
raise ValueError('Invalid public key')
_UpperCAmelCase = pow(A , self.__private_key , self.prime)
return shaaaa(str(A).encode()).hexdigest()
@staticmethod
def _lowerCamelCase ( A : int , A : int) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A) == 1
)
@staticmethod
def _lowerCamelCase ( A : str , A : str , A : int = 14) -> str:
"""simple docstring"""
_UpperCAmelCase = int(A , base=16)
_UpperCAmelCase = int(A , base=16)
_UpperCAmelCase = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(A , A):
raise ValueError('Invalid public key')
_UpperCAmelCase = pow(A , A , A)
return shaaaa(str(A).encode()).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | 1 |
from __future__ import annotations
lowerCAmelCase_ = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = graph
# mapping node to its parent in resulting breadth first tree
snake_case_ : List[Any] = {}
snake_case_ : List[str] = source_vertex
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = {self.source_vertex}
snake_case_ : Any = None
snake_case_ : Tuple = [self.source_vertex] # first in first out queue
while queue:
snake_case_ : Optional[int] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__snake_case )
snake_case_ : List[Any] = vertex
queue.append(__snake_case )
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
snake_case_ : List[str] = self.parent.get(__snake_case )
if target_vertex_parent is None:
snake_case_ : Optional[Any] = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(__snake_case )
return self.shortest_path(__snake_case ) + F'''->{target_vertex}'''
if __name__ == "__main__":
lowerCAmelCase_ = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 279 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_SCREAMING_SNAKE_CASE : List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_SCREAMING_SNAKE_CASE : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = len([g for position, g in enumerate(UpperCamelCase_ ) if g == main_target[position]] )
return (item, float(UpperCamelCase_ ))
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = random.randint(0 ,len(UpperCamelCase_ ) - 1 )
snake_case = parent_a[:random_slice] + parent_a[random_slice:]
snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = list(UpperCamelCase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
snake_case = random.choice(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,):
"""simple docstring"""
snake_case = []
# Generate more children proportionally to the fitness score.
snake_case = int(parent_a[1] * 1_00 ) + 1
snake_case = 10 if child_n >= 10 else child_n
for _ in range(UpperCamelCase_ ):
snake_case = population_score[random.randint(0 ,UpperCamelCase_ )][0]
snake_case , snake_case = crossover(parent_a[0] ,UpperCamelCase_ )
# Append new string to the population list.
pop.append(mutate(UpperCamelCase_ ,UpperCamelCase_ ) )
pop.append(mutate(UpperCamelCase_ ,UpperCamelCase_ ) )
return pop
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
snake_case = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(UpperCamelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
snake_case = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(UpperCamelCase_ )
# Generate random starting population.
snake_case = []
for _ in range(UpperCamelCase_ ):
population.append(''''''.join([random.choice(UpperCamelCase_ ) for i in range(len(UpperCamelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
snake_case , snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(UpperCamelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
snake_case = [evaluate(UpperCamelCase_ ,UpperCamelCase_ ) for item in population]
# Check if there is a matching evolution.
snake_case = sorted(UpperCamelCase_ ,key=lambda UpperCamelCase_ : x[1] ,reverse=UpperCamelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(UpperCamelCase_ )
# Normalize population score to be between 0 and 1.
snake_case = [
(item, score / len(UpperCamelCase_ )) for item, score in population_score
]
# This is selection
for i in range(UpperCamelCase_ ):
population.extend(select(population_score[int(UpperCamelCase_ )] ,UpperCamelCase_ ,UpperCamelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(UpperCamelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_SCREAMING_SNAKE_CASE : str = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 127 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_A = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
_A = {
"google/electra-small-generator": 5_12,
"google/electra-base-generator": 5_12,
"google/electra-large-generator": 5_12,
"google/electra-small-discriminator": 5_12,
"google/electra-base-discriminator": 5_12,
"google/electra-large-discriminator": 5_12,
}
_A = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[int] = ElectraTokenizer
def __init__(self : Optional[Any] , _A : Union[str, Any]=None , _A : Optional[int]=None , _A : Dict=True , _A : Tuple="[UNK]" , _A : List[str]="[SEP]" , _A : Dict="[PAD]" , _A : Tuple="[CLS]" , _A : Optional[int]="[MASK]" , _A : Optional[Any]=True , _A : List[Any]=None , **_A : Any , ) -> int:
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _A ) != do_lower_case
or normalizer_state.get("strip_accents" , _A ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _A ) != tokenize_chinese_chars
):
snake_case = getattr(_A , normalizer_state.pop("type" ) )
snake_case = do_lower_case
snake_case = strip_accents
snake_case = tokenize_chinese_chars
snake_case = normalizer_class(**_A )
snake_case = do_lower_case
def UpperCAmelCase(self : Tuple , _A : str , _A : int=None ) -> Any:
snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase(self : Union[str, Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase(self : Union[str, Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 137 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowercase_ ( A__ ) -> str:
"""simple docstring"""
return getitem, k
def lowercase_ ( A__ , A__ ) -> str:
"""simple docstring"""
return setitem, k, v
def lowercase_ ( A__ ) -> List[Any]:
"""simple docstring"""
return delitem, k
def lowercase_ ( A__ , A__ , *A__ ) -> str:
"""simple docstring"""
try:
return fun(A__ , *A__ ), None
except Exception as e:
return None, e
_A = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
_A = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
_A = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
_A = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
_A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
_A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowercase_ ( A__ ) -> List[Any]:
"""simple docstring"""
snake_case = HashMap(initial_block_size=4 )
snake_case = {}
for _, (fun, *args) in enumerate(A__ ):
snake_case , snake_case = _run_operation(A__ , A__ , *A__ )
snake_case , snake_case = _run_operation(A__ , A__ , *A__ )
assert my_res == py_res
assert str(A__ ) == str(A__ )
assert set(A__ ) == set(A__ )
assert len(A__ ) == len(A__ )
assert set(my.items() ) == set(py.items() )
def lowercase_ ( ) -> Optional[int]:
"""simple docstring"""
def is_public(A__ ) -> bool:
return not name.startswith("_" )
snake_case = {name for name in dir({} ) if is_public(A__ )}
snake_case = {name for name in dir(HashMap() ) if is_public(A__ )}
assert dict_public_names > hash_public_names
| 137 | 1 |
from math import factorial
class __a :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = real
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Union[str, Any] = [1] * rank
else:
UpperCamelCase__ : int = rank
def __repr__( self : Tuple ):
'''simple docstring'''
return (
F'{self.real}+'
F'{"+".join(str(SCREAMING_SNAKE_CASE )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , SCREAMING_SNAKE_CASE )
def __add__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return Dual(self.real + other , self.duals )
UpperCamelCase__ : Optional[int] = self.duals.copy()
UpperCamelCase__ : Any = other.duals.copy()
if len(SCREAMING_SNAKE_CASE ) > len(SCREAMING_SNAKE_CASE ):
o_dual.extend([1] * (len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE )) )
elif len(SCREAMING_SNAKE_CASE ) < len(SCREAMING_SNAKE_CASE ):
s_dual.extend([1] * (len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE )) )
UpperCamelCase__ : Optional[int] = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Dict = __add__
def __sub__( self : Tuple , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
return self + other * -1
def __mul__( self : int , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : str = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : List[Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : str = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , SCREAMING_SNAKE_CASE )
raise ValueError
def __floordiv__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Dict = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , SCREAMING_SNAKE_CASE )
raise ValueError
def __pow__( self : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if n < 0 or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
UpperCamelCase__ : str = self
for _ in range(n - 1 ):
x *= self
return x
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
UpperCamelCase__ : Optional[Any] = Dual(__lowerCAmelCase , 1 )
UpperCamelCase__ : Any = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
return y**2 * y**4
print(differentiate(f, 9, 2)) | 189 |
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod() | 189 | 1 |
snake_case__ : List[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def _a ( lowerCamelCase: dict , lowerCamelCase: int , lowerCamelCase: Optional[Any] ) -> list[str]:
'''simple docstring'''
__A = set()
# keep track of all the paths to be checked
__A = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__A = queue.pop(0 )
# get the last node from the path
__A = path[-1]
if node not in explored:
__A = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__A = list(lowerCamelCase__ )
new_path.append(lowerCamelCase__ )
queue.append(lowerCamelCase__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCamelCase__ )
# in case there's no path between the 2 nodes
return []
def _a ( lowerCamelCase: dict , lowerCamelCase: Tuple , lowerCamelCase: Any ) -> int:
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__A = [start]
__A = set(lowerCamelCase__ )
# Keep tab on distances from `start` node.
__A = {start: 0, target: -1}
while queue:
__A = queue.pop(0 )
if node == target:
__A = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCamelCase__ )
queue.append(lowerCamelCase__ )
__A = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 354 |
def _a ( lowerCamelCase: str ) -> bool:
'''simple docstring'''
__A = [int(lowerCamelCase ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(lowerCamelCase ) == 4 and all(0 <= int(lowerCamelCase ) <= 2_54 for octet in octets )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = input().strip()
snake_case__ : Any = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f'{ip} is a {valid_or_invalid} IP v4 address.')
| 250 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = tempfile.mkdtemp()
__a = 8
# DPR tok
__a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
__a = os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__a = dict(zip(_a , range(len(_a ) ) ) )
__a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__a = {'''unk_token''': '''<unk>'''}
__a = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
__a = os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__a = os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCAmelCase ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCAmelCase ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __UpperCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __UpperCAmelCase ( self ):
__a = os.path.join(self.tmpdirname , '''rag_tokenizer''' )
__a = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
__a = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_a )
rag_tokenizer.save_pretrained(_a )
__a = RagTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __UpperCAmelCase ( self ):
__a = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' )
__a = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
__a = tokenizer(_a )
self.assertIsNotNone(_a )
@slow
def __UpperCAmelCase ( self ):
__a = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' )
__a = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
__a = tokenizer(_a )
self.assertIsNotNone(_a )
| 45 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
def __init__(self , A , A ):
super().__init__()
lowerCamelCase_ : Tuple = module
lowerCamelCase_ : Any = nn.Sequential(
nn.Linear(module.in_features , A , bias=A ) , nn.Linear(A , module.out_features , bias=A ) , )
lowerCamelCase_ : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase__ (self , A , *A , **A ):
return self.module(A , *A , **A ) + self.adapter(A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCamelCase : Tuple = "bigscience/bloom-1b7"
# Constant values
lowerCamelCase : List[Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase : int = "Hello my name is"
lowerCamelCase : Tuple = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCamelCase : Optional[int] = 10
def UpperCAmelCase__ (self ):
# Models and tokenizer
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# Models and tokenizer
lowerCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCamelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_abit.config
self.assertTrue(hasattr(A , '''quantization_config''' ) )
lowerCamelCase_ : Tuple = config.to_dict()
lowerCamelCase_ : Optional[Any] = config.to_diff_dict()
lowerCamelCase_ : Any = config.to_json_string()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
lowerCamelCase_ : str = self.model_fpaa.get_memory_footprint()
lowerCamelCase_ : List[str] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCamelCase_ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase__ (self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = BitsAndBytesConfig()
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : int = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = BitsAndBytesConfig()
with self.assertRaises(A ):
lowerCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , load_in_abit=A , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : List[Any] = self.model_fpaa.to(torch.floataa )
lowerCamelCase_ : Tuple = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
lowerCamelCase_ : str = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCamelCase_ : List[Any] = self.model_fpaa.half()
# Check this does not throw an error
lowerCamelCase_ : List[str] = self.model_fpaa.float()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=A , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : List[Any] = '''t5-small'''
lowerCamelCase_ : Optional[Any] = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
lowerCamelCase_ : Optional[Any] = '''Translate in German: Hello, my dog is cute'''
def UpperCAmelCase__ (self ):
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from transformers import TaForConditionalGeneration
lowerCamelCase_ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCamelCase_ : List[Any] = None
# test with `t5-small`
lowerCamelCase_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[Any] = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[int] = model.generate(**A )
lowerCamelCase_ : Any = modules
def UpperCAmelCase__ (self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCamelCase_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCamelCase_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Dict = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Tuple = model.generate(**A )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# model_name
lowerCamelCase_ : Optional[int] = '''bigscience/bloom-560m'''
lowerCamelCase_ : Optional[int] = '''t5-small'''
# Different types of model
lowerCamelCase_ : List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Sequence classification model
lowerCamelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A , device_map='''auto''' )
# CausalLM model
lowerCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Seq2seq model
lowerCamelCase_ : int = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCamelCase_ : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCamelCase_ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
lowerCamelCase_ : Any = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''facebook/opt-350m'''
super().setUp()
def UpperCAmelCase__ (self ):
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCamelCase_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCamelCase_ : List[str] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCamelCase_ : Optional[int] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A ) ):
lowerCamelCase_ : Dict = LoRALayer(module.q_proj , rank=1_6 )
lowerCamelCase_ : str = LoRALayer(module.k_proj , rank=1_6 )
lowerCamelCase_ : int = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
lowerCamelCase_ : Union[str, Any] = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCamelCase_ : Optional[int] = model.forward(**A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A , A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[Any] = "gpt2-xl"
lowerCamelCase : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 318 | 0 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase ( snake_case_ , snake_case_ , snake_case_ ):
@register_to_config
def __init__( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : float , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : str , __snake_case : bool = False , ) -> Any:
super().__init__()
_lowerCAmelCase = nn.Embedding(__snake_case , __snake_case )
_lowerCAmelCase = nn.Embedding(__snake_case , __snake_case )
_lowerCAmelCase = False
_lowerCAmelCase = nn.Dropout(p=__snake_case )
_lowerCAmelCase = TaConfig(
vocab_size=__snake_case , d_model=__snake_case , num_heads=__snake_case , d_kv=__snake_case , d_ff=__snake_case , dropout_rate=__snake_case , feed_forward_proj=__snake_case , is_decoder=__snake_case , is_encoder_decoder=__snake_case , )
_lowerCAmelCase = nn.ModuleList()
for lyr_num in range(__snake_case ):
_lowerCAmelCase = TaBlock(__snake_case )
self.encoders.append(__snake_case )
_lowerCAmelCase = TaLayerNorm(__snake_case )
_lowerCAmelCase = nn.Dropout(p=__snake_case )
def lowercase__ ( self : Optional[Any] , __snake_case : Any , __snake_case : Optional[Any] ) -> List[str]:
_lowerCAmelCase = self.token_embedder(__snake_case )
_lowerCAmelCase = encoder_input_tokens.shape[1]
_lowerCAmelCase = torch.arange(__snake_case , device=encoder_input_tokens.device )
x += self.position_encoding(__snake_case )
_lowerCAmelCase = self.dropout_pre(__snake_case )
# inverted the attention mask
_lowerCAmelCase = encoder_input_tokens.size()
_lowerCAmelCase = self.get_extended_attention_mask(__snake_case , __snake_case )
for lyr in self.encoders:
_lowerCAmelCase = lyr(__snake_case , __snake_case )[0]
_lowerCAmelCase = self.layer_norm(__snake_case )
return self.dropout_post(__snake_case ), encoder_inputs_mask
| 220 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
A__ : Tuple =None
try:
import msvcrt
except ImportError:
A__ : str =None
try:
import fcntl
except ImportError:
A__ : Dict =None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
A__ : Tuple =OSError
# Data
# ------------------------------------------------
A__ : Optional[Any] =[
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
A__ : Union[str, Any] ='''3.0.12'''
A__ : Optional[Any] =None
def UpperCamelCase__ ( ):
"""simple docstring"""
global _logger
_lowerCAmelCase = _logger or logging.getLogger(__name__ )
return _logger
class UpperCAmelCase ( snake_case_ ):
def __init__( self : Optional[int] , __snake_case : str ) -> Optional[Any]:
_lowerCAmelCase = lock_file
return None
def __str__( self : Union[str, Any] ) -> Any:
_lowerCAmelCase = f"The file lock '{self.lock_file}' could not be acquired."
return temp
class UpperCAmelCase :
def __init__( self : str , __snake_case : str ) -> Tuple:
_lowerCAmelCase = lock
return None
def __enter__( self : Union[str, Any] ) -> Union[str, Any]:
return self.lock
def __exit__( self : Tuple , __snake_case : List[Any] , __snake_case : int , __snake_case : Optional[int] ) -> Union[str, Any]:
self.lock.release()
return None
class UpperCAmelCase :
def __init__( self : List[Any] , __snake_case : str , __snake_case : Tuple=-1 , __snake_case : List[str]=None ) -> Any:
_lowerCAmelCase = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
_lowerCAmelCase = self.hash_filename_if_too_long(__snake_case , __snake_case )
# The path to the lock file.
_lowerCAmelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_lowerCAmelCase = None
# The default timeout value.
_lowerCAmelCase = timeout
# We use this lock primarily for the lock counter.
_lowerCAmelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_lowerCAmelCase = 0
return None
@property
def lowercase__ ( self : int ) -> List[str]:
return self._lock_file
@property
def lowercase__ ( self : Dict ) -> List[Any]:
return self._timeout
@timeout.setter
def lowercase__ ( self : Tuple , __snake_case : int ) -> Optional[Any]:
_lowerCAmelCase = float(__snake_case )
return None
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
raise NotImplementedError()
def lowercase__ ( self : Optional[Any] ) -> Any:
raise NotImplementedError()
@property
def lowercase__ ( self : str ) -> Dict:
return self._lock_file_fd is not None
def lowercase__ ( self : Union[str, Any] , __snake_case : Union[str, Any]=None , __snake_case : List[str]=0.05 ) -> List[str]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
_lowerCAmelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_lowerCAmelCase = id(self )
_lowerCAmelCase = self._lock_file
_lowerCAmelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(f"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__snake_case )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_lowerCAmelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowercase__ ( self : Dict , __snake_case : Optional[int]=False ) -> Union[str, Any]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_lowerCAmelCase = id(self )
_lowerCAmelCase = self._lock_file
logger().debug(f"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
_lowerCAmelCase = 0
logger().debug(f"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : List[str] ) -> Union[str, Any]:
self.acquire()
return self
def __exit__( self : Union[str, Any] , __snake_case : str , __snake_case : Tuple , __snake_case : Any ) -> int:
self.release()
return None
def __del__( self : Optional[Any] ) -> Any:
self.release(force=__snake_case )
return None
def lowercase__ ( self : Optional[Any] , __snake_case : str , __snake_case : int ) -> str:
_lowerCAmelCase = os.path.basename(__snake_case )
if len(__snake_case ) > max_length and max_length > 0:
_lowerCAmelCase = os.path.dirname(__snake_case )
_lowerCAmelCase = str(hash(__snake_case ) )
_lowerCAmelCase = filename[: max_length - len(__snake_case ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__snake_case , __snake_case )
else:
return path
class UpperCAmelCase ( snake_case_ ):
def __init__( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[int]=-1 , __snake_case : str=None ) -> Dict:
from .file_utils import relative_to_absolute_path
super().__init__(__snake_case , timeout=__snake_case , max_filename_length=__snake_case )
_lowerCAmelCase = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowercase__ ( self : List[str] ) -> Tuple:
_lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_lowerCAmelCase = os.open(self._lock_file , __snake_case )
except OSError:
pass
else:
try:
msvcrt.locking(__snake_case , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__snake_case )
else:
_lowerCAmelCase = fd
return None
def lowercase__ ( self : List[str] ) -> List[str]:
_lowerCAmelCase = self._lock_file_fd
_lowerCAmelCase = None
msvcrt.locking(__snake_case , msvcrt.LK_UNLCK , 1 )
os.close(__snake_case )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCAmelCase ( snake_case_ ):
def __init__( self : Dict , __snake_case : List[Any] , __snake_case : List[Any]=-1 , __snake_case : List[str]=None ) -> Dict:
_lowerCAmelCase = os.statvfs(os.path.dirname(__snake_case ) ).f_namemax
super().__init__(__snake_case , timeout=__snake_case , max_filename_length=__snake_case )
def lowercase__ ( self : Optional[int] ) -> List[Any]:
_lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_lowerCAmelCase = os.open(self._lock_file , __snake_case )
try:
fcntl.flock(__snake_case , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__snake_case )
else:
_lowerCAmelCase = fd
return None
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_lowerCAmelCase = self._lock_file_fd
_lowerCAmelCase = None
fcntl.flock(__snake_case , fcntl.LOCK_UN )
os.close(__snake_case )
return None
class UpperCAmelCase ( snake_case_ ):
def lowercase__ ( self : Union[str, Any] ) -> Dict:
_lowerCAmelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_lowerCAmelCase = os.open(self._lock_file , __snake_case )
except OSError:
pass
else:
_lowerCAmelCase = fd
return None
def lowercase__ ( self : Any ) -> Optional[Any]:
os.close(self._lock_file_fd )
_lowerCAmelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
A__ : Tuple =None
if msvcrt:
A__ : List[Any] =WindowsFileLock
elif fcntl:
A__ : Tuple =UnixFileLock
else:
A__ : Tuple =SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 220 | 1 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def UpperCAmelCase__ ( _A : List[str] ):
'''simple docstring'''
return x + 2
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__ ='''x = 3'''
a__ ={}
a__ =evaluate(lowercase_, {}, state=lowercase_ )
assert result == 3
self.assertDictEqual(lowercase_, {'''x''': 3} )
a__ ='''x = y'''
a__ ={'''y''': 5}
a__ =evaluate(lowercase_, {}, state=lowercase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowercase_, {'''x''': 5, '''y''': 5} )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ ='''y = add_two(x)'''
a__ ={'''x''': 3}
a__ =evaluate(lowercase_, {'''add_two''': add_two}, state=lowercase_ )
assert result == 5
self.assertDictEqual(lowercase_, {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
a__ =evaluate(lowercase_, {}, state=lowercase_ )
assert result is None
assert "tried to execute add_two" in out.out
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
a__ ='''x = 3'''
a__ ={}
a__ =evaluate(lowercase_, {}, state=lowercase_ )
assert result == 3
self.assertDictEqual(lowercase_, {'''x''': 3} )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__ ='''test_dict = {\'x\': x, \'y\': add_two(x)}'''
a__ ={'''x''': 3}
a__ =evaluate(lowercase_, {'''add_two''': add_two}, state=lowercase_ )
self.assertDictEqual(lowercase_, {'''x''': 3, '''y''': 5} )
self.assertDictEqual(lowercase_, {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ ='''x = 3\ny = 5'''
a__ ={}
a__ =evaluate(lowercase_, {}, state=lowercase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowercase_, {'''x''': 3, '''y''': 5} )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__ ='''text = f\'This is x: {x}.\''''
a__ ={'''x''': 3}
a__ =evaluate(lowercase_, {}, state=lowercase_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(lowercase_, {'''x''': 3, '''text''': '''This is x: 3.'''} )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ ='''if x <= 3:\n y = 2\nelse:\n y = 5'''
a__ ={'''x''': 3}
a__ =evaluate(lowercase_, {}, state=lowercase_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(lowercase_, {'''x''': 3, '''y''': 2} )
a__ ={'''x''': 8}
a__ =evaluate(lowercase_, {}, state=lowercase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowercase_, {'''x''': 8, '''y''': 5} )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ ='''test_list = [x, add_two(x)]'''
a__ ={'''x''': 3}
a__ =evaluate(lowercase_, {'''add_two''': add_two}, state=lowercase_ )
self.assertListEqual(lowercase_, [3, 5] )
self.assertDictEqual(lowercase_, {'''x''': 3, '''test_list''': [3, 5]} )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__ ='''y = x'''
a__ ={'''x''': 3}
a__ =evaluate(lowercase_, {}, state=lowercase_ )
assert result == 3
self.assertDictEqual(lowercase_, {'''x''': 3, '''y''': 3} )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ ='''test_list = [x, add_two(x)]\ntest_list[1]'''
a__ ={'''x''': 3}
a__ =evaluate(lowercase_, {'''add_two''': add_two}, state=lowercase_ )
assert result == 5
self.assertDictEqual(lowercase_, {'''x''': 3, '''test_list''': [3, 5]} )
a__ ='''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
a__ ={'''x''': 3}
a__ =evaluate(lowercase_, {'''add_two''': add_two}, state=lowercase_ )
assert result == 5
self.assertDictEqual(lowercase_, {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__ ='''x = 0\nfor i in range(3):\n x = i'''
a__ ={}
a__ =evaluate(lowercase_, {'''range''': range}, state=lowercase_ )
assert result == 2
self.assertDictEqual(lowercase_, {'''x''': 2, '''i''': 2} )
| 188 |
def UpperCAmelCase__ ( ):
'''simple docstring'''
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(_A , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 188 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : int = logging.get_logger(__name__)
def __lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : int=False ):
"""simple docstring"""
a :Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : int ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
a :str = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a :List[Any] = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' )
a :Tuple = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a :int = in_proj_weight[
: config.hidden_size, :
]
a :Tuple = in_proj_bias[: config.hidden_size]
a :str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a :List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a :Any = in_proj_weight[
-config.hidden_size :, :
]
a :List[str] = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( UpperCAmelCase_ : List[str] ):
"""simple docstring"""
a :Optional[Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ):
"""simple docstring"""
a :Optional[Any] = dct.pop(SCREAMING_SNAKE_CASE_ )
a :Optional[int] = val
@torch.no_grad()
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
a :Union[str, Any] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=SCREAMING_SNAKE_CASE_ )
a :Optional[Any] = False
a :Any = False
a :Any = False
a :Dict = False
if "vqa" in checkpoint_url:
a :Dict = True
a :Any = 3129
a :Tuple = '''huggingface/label-files'''
a :Tuple = '''vqa2-id2label.json'''
a :Dict = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
a :List[str] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
a :List[Any] = idalabel
a :Optional[Any] = {v: k for k, v in idalabel.items()}
a :Optional[Any] = ViltForQuestionAnswering(SCREAMING_SNAKE_CASE_ )
elif "nlvr" in checkpoint_url:
a :Union[str, Any] = True
a :List[str] = 2
a :List[str] = {0: '''False''', 1: '''True'''}
a :List[Any] = {v: k for k, v in config.idalabel.items()}
a :Tuple = 3
a :List[Any] = ViltForImagesAndTextClassification(SCREAMING_SNAKE_CASE_ )
elif "irtr" in checkpoint_url:
a :List[Any] = True
a :Optional[Any] = ViltForImageAndTextRetrieval(SCREAMING_SNAKE_CASE_ )
elif "mlm_itm" in checkpoint_url:
a :Union[str, Any] = True
a :Any = ViltForMaskedLM(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
a :Union[str, Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''state_dict''']
a :int = create_rename_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if mlm_model or irtr_model:
a :Tuple = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
a , a :Optional[Any] = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Define processor
a :Any = ViltImageProcessor(size=384 )
a :Any = BertTokenizer.from_pretrained('''bert-base-uncased''' )
a :int = ViltProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Forward pass on example inputs (image + text)
if nlvr_model:
a :List[str] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=SCREAMING_SNAKE_CASE_ ).raw )
a :int = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=SCREAMING_SNAKE_CASE_ ).raw )
a :Optional[Any] = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
a :int = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
a :Dict = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
a :str = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
a :Optional[Any] = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=SCREAMING_SNAKE_CASE_ ).raw )
if mlm_model:
a :Dict = '''a bunch of [MASK] laying on a [MASK].'''
else:
a :str = '''How many cats are there?'''
a :Optional[Any] = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
a :int = model(**SCREAMING_SNAKE_CASE_ )
# Verify outputs
if mlm_model:
a :List[str] = torch.Size([1, 11, 3_0522] )
a :List[Any] = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
# verify masked token prediction equals "cats"
a :Any = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
a :Any = torch.Size([1, 3129] )
a :Dict = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
# verify vqa prediction equals "2"
a :str = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
a :List[Any] = torch.Size([1, 2] )
a :int = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
snake_case : Optional[Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 366 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
snake_case : str = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class _snake_case ( _snake_case ):
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=None ):
a :Tuple = {}
if top_k is not None:
a :int = top_k
return {}, {}, postprocess_params
def __call__( self , _lowerCamelCase , **_lowerCamelCase ):
return super().__call__(_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :str = load_image(_lowerCamelCase )
a :Any = self.image_processor(images=_lowerCamelCase , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :List[str] = self.model(**_lowerCamelCase )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=5 ):
if top_k > self.model.config.num_labels:
a :List[Any] = self.model.config.num_labels
if self.framework == "pt":
a :int = model_outputs.logits.softmax(-1 )[0]
a , a :Union[str, Any] = probs.topk(_lowerCamelCase )
elif self.framework == "tf":
a :Optional[Any] = stable_softmax(model_outputs.logits , axis=-1 )[0]
a :Union[str, Any] = tf.math.top_k(_lowerCamelCase , k=_lowerCamelCase )
a , a :Optional[int] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
a :Optional[int] = scores.tolist()
a :str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCamelCase , _lowerCamelCase )]
| 281 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
A: Any = logging.get_logger(__name__)
A: Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
A: Union[str, Any] = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
A: Tuple = {
"roberta-base": 5_1_2,
"roberta-large": 5_1_2,
"roberta-large-mnli": 5_1_2,
"distilroberta-base": 5_1_2,
"roberta-base-openai-detector": 5_1_2,
"roberta-large-openai-detector": 5_1_2,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : int = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : str = ['input_ids', 'attention_mask']
__lowerCAmelCase : List[Any] = RobertaTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="replace" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , errors=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCAmelCase : int = getattr(_SCREAMING_SNAKE_CASE , pre_tok_state.pop("""type""" ) )
UpperCAmelCase : List[str] = add_prefix_space
UpperCAmelCase : Optional[Any] = pre_tok_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = add_prefix_space
UpperCAmelCase : Optional[int] = """post_processor"""
UpperCAmelCase : str = getattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
UpperCAmelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase : int = tuple(state["""sep"""] )
if "cls" in state:
UpperCAmelCase : Dict = tuple(state["""cls"""] )
UpperCAmelCase : Optional[Any] = False
if state.get("""add_prefix_space""" , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCAmelCase : Optional[int] = add_prefix_space
UpperCAmelCase : Union[str, Any] = True
if state.get("""trim_offsets""" , _SCREAMING_SNAKE_CASE ) != trim_offsets:
UpperCAmelCase : int = trim_offsets
UpperCAmelCase : Union[str, Any] = True
if changes_to_apply:
UpperCAmelCase : List[Any] = getattr(_SCREAMING_SNAKE_CASE , state.pop("""type""" ) )
UpperCAmelCase : Optional[int] = component_class(**_SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value
UpperCAmelCase : Any = value
def SCREAMING_SNAKE_CASE ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase : List[str] = kwargs.get("""is_split_into_words""" , _SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase : Tuple = kwargs.get("""is_split_into_words""" , _SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase : str = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : Dict = [self.sep_token_id]
UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 109 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] ="""gptj"""
UpperCAmelCase__ : Any ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , UpperCAmelCase__ : int=5_0_4_0_0 , UpperCAmelCase__ : str=2_0_4_8 , UpperCAmelCase__ : str=4_0_9_6 , UpperCAmelCase__ : List[Any]=2_8 , UpperCAmelCase__ : Union[str, Any]=1_6 , UpperCAmelCase__ : str=6_4 , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[Any]="gelu_new" , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Optional[int]=1e-5 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : str=5_0_2_5_6 , UpperCAmelCase__ : Dict=5_0_2_5_6 , UpperCAmelCase__ : int=False , **UpperCAmelCase__ : Dict , ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : str = n_positions
SCREAMING_SNAKE_CASE : int = n_embd
SCREAMING_SNAKE_CASE : Any = n_layer
SCREAMING_SNAKE_CASE : Optional[Any] = n_head
SCREAMING_SNAKE_CASE : Union[str, Any] = n_inner
SCREAMING_SNAKE_CASE : Dict = rotary_dim
SCREAMING_SNAKE_CASE : Union[str, Any] = activation_function
SCREAMING_SNAKE_CASE : Any = resid_pdrop
SCREAMING_SNAKE_CASE : List[Any] = embd_pdrop
SCREAMING_SNAKE_CASE : Tuple = attn_pdrop
SCREAMING_SNAKE_CASE : Any = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : Any = bos_token_id
SCREAMING_SNAKE_CASE : List[Any] = eos_token_id
super().__init__(
bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , tie_word_embeddings=UpperCAmelCase__ , **UpperCAmelCase__ )
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default" , UpperCAmelCase__ : List[PatchingSpec] = None , UpperCAmelCase__ : bool = False , ) ->Optional[int]:
"""simple docstring"""
super().__init__(UpperCAmelCase__ , task=UpperCAmelCase__ , patching_specs=UpperCAmelCase__ , use_past=UpperCAmelCase__ )
if not getattr(self._config , """pad_token_id""" , UpperCAmelCase__ ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE : str = 0
@property
def _lowercase ( self : Tuple ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase__ , direction="""inputs""" )
SCREAMING_SNAKE_CASE : Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
SCREAMING_SNAKE_CASE : List[str] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def _lowercase ( self : List[str] ) ->int:
"""simple docstring"""
return self._config.n_layer
@property
def _lowercase ( self : Tuple ) ->int:
"""simple docstring"""
return self._config.n_head
def _lowercase ( self : str , UpperCAmelCase__ : PreTrainedTokenizer , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[TensorType] = None , ) ->Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = super(UpperCAmelCase__ , self ).generate_dummy_inputs(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE : Tuple = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : Dict = seqlen + 2
SCREAMING_SNAKE_CASE : Any = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[int] = [
(torch.zeros(UpperCAmelCase__ ), torch.zeros(UpperCAmelCase__ )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE : Dict = common_inputs["""attention_mask"""]
if self.use_past:
SCREAMING_SNAKE_CASE : Optional[int] = ordered_inputs["""attention_mask"""].dtype
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCAmelCase__ , UpperCAmelCase__ , dtype=UpperCAmelCase__ )] , dim=1 )
return ordered_inputs
@property
def _lowercase ( self : Dict ) ->int:
"""simple docstring"""
return 1_3
| 245 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
A_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __init__( self : Union[str, Any] , *snake_case : Tuple , **snake_case : Optional[int] ):
'''simple docstring'''
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , snake_case , )
super().__init__(*snake_case , **snake_case )
| 355 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ = logging.get_logger(__name__)
A_ = Dict[str, Any]
A_ = List[Prediction]
@add_end_docstrings(UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __init__( self : str , *snake_case : Tuple , **snake_case : Tuple ):
'''simple docstring'''
super().__init__(*snake_case , **snake_case )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _UpperCamelCase ( self : List[Any] , **snake_case : Optional[int] ):
'''simple docstring'''
A__ : Dict = {}
if "threshold" in kwargs:
A__ : int = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : Tuple , *snake_case : Union[str, Any] , **snake_case : Union[str, Any] ):
'''simple docstring'''
return super().__call__(*snake_case , **snake_case )
def _UpperCamelCase ( self : str , snake_case : int ):
'''simple docstring'''
A__ : List[str] = load_image(snake_case )
A__ : int = torch.IntTensor([[image.height, image.width]] )
A__ : Union[str, Any] = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
A__ : str = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
A__ : List[str] = target_size
return inputs
def _UpperCamelCase ( self : Optional[int] , snake_case : List[Any] ):
'''simple docstring'''
A__ : str = model_inputs.pop("""target_size""" )
A__ : Dict = self.model(**snake_case )
A__ : Optional[Any] = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
A__ : str = model_inputs["""bbox"""]
return model_outputs
def _UpperCamelCase ( self : Tuple , snake_case : Optional[int] , snake_case : int=0.9 ):
'''simple docstring'''
A__ : Any = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A__ , A__ : Tuple = target_size[0].tolist()
def unnormalize(snake_case : Optional[int] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
A__ , A__ : Optional[int] = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A__ : Optional[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A__ : List[str] = [unnormalize(snake_case ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
A__ : Tuple = ["""score""", """label""", """box"""]
A__ : Any = [dict(zip(snake_case , snake_case ) ) for vals in zip(scores.tolist() , snake_case , snake_case ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A__ : Union[str, Any] = self.image_processor.post_process_object_detection(snake_case , snake_case , snake_case )
A__ : str = raw_annotations[0]
A__ : str = raw_annotation["""scores"""]
A__ : List[Any] = raw_annotation["""labels"""]
A__ : int = raw_annotation["""boxes"""]
A__ : str = scores.tolist()
A__ : Any = [self.model.config.idalabel[label.item()] for label in labels]
A__ : int = [self._get_bounding_box(snake_case ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A__ : str = ["""score""", """label""", """box"""]
A__ : Dict = [
dict(zip(snake_case , snake_case ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def _UpperCamelCase ( self : Union[str, Any] , snake_case : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
A__ , A__ , A__ , A__ : Any = box.int().tolist()
A__ : Any = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 296 | 0 |
'''simple docstring'''
__snake_case = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :List[str] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__snake_case = [None] * 10000000
__snake_case = True
__snake_case = False
def a ( __a ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase__ :Dict = chain(next_number(__a ) )
UpperCamelCase__ :int = number_chain
while number < 10000000:
UpperCamelCase__ :Optional[int] = number_chain
number *= 10
return number_chain
def a ( __a = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , __a ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""") | 97 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 | 0 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 364 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[str]=8 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=99 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Optional[int]=5 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=36 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : int=None , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = 300
return config
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = MraModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MraForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MraForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = MraForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = ()
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = MraModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""MRA does not output attentions""" )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
return
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
__SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
__SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_265
__SCREAMING_SNAKE_CASE = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
__SCREAMING_SNAKE_CASE = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_265
__SCREAMING_SNAKE_CASE = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 331 | 0 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :int = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = emb.weight.shape
lowerCAmelCase__ :Dict = nn.Linear(A_ , A_ , bias=A_ )
lowerCAmelCase__ :int = emb.weight.data
return lin_layer
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = torch.load(A_ , map_location='cpu' )
lowerCAmelCase__ :List[str] = Namespace(**checkpoint['cfg']['model'] )
lowerCAmelCase__ :Any = checkpoint['''model''']
remove_ignore_keys_(A_ )
lowerCAmelCase__ :Tuple = state_dict['''decoder.embed_tokens.weight'''].shape[0]
lowerCAmelCase__ :Optional[int] = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
lowerCAmelCase__ :str = XGLMConfig(
vocab_size=A_ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCAmelCase__ :List[Any] = XGLMForCausalLM(A_ )
lowerCAmelCase__ :Optional[Any] = model.load_state_dict(A_ , strict=A_ )
print(A_ )
lowerCAmelCase__ :Dict = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__A = parser.parse_args()
__A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 293 |
"""simple docstring"""
import random
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = [], [], []
for element in data:
if element < pivot:
less.append(A_ )
elif element > pivot:
greater.append(A_ )
else:
equal.append(A_ )
return less, equal, greater
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(A_ ) or index < 0:
return None
lowerCAmelCase__ : str = items[random.randint(0 , len(A_ ) - 1 )]
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = _partition(A_ , A_ )
lowerCAmelCase__ : str = len(A_ )
lowerCAmelCase__ : Optional[Any] = len(A_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(A_ , A_ )
# must be in larger
else:
return quick_select(A_ , index - (m + count) )
| 106 | 0 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1 ) -> List[Any]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> Tuple:
lowerCAmelCase__ : Optional[int] = []
for old_item in old_list:
lowerCAmelCase__ : List[Any] = old_item.replace('in_layers.0' , 'norm1' )
lowerCAmelCase__ : str = new_item.replace('in_layers.2' , 'conv1' )
lowerCAmelCase__ : Tuple = new_item.replace('out_layers.0' , 'norm2' )
lowerCAmelCase__ : str = new_item.replace('out_layers.3' , 'conv2' )
lowerCAmelCase__ : str = new_item.replace('emb_layers.1' , 'time_emb_proj' )
lowerCAmelCase__ : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' )
lowerCAmelCase__ : str = shave_segments(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=SCREAMING_SNAKE_CASE_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> Dict:
lowerCAmelCase__ : List[str] = []
for old_item in old_list:
lowerCAmelCase__ : str = old_item
lowerCAmelCase__ : Tuple = new_item.replace('norm.weight' , 'group_norm.weight' )
lowerCAmelCase__ : Union[str, Any] = new_item.replace('norm.bias' , 'group_norm.bias' )
lowerCAmelCase__ : List[str] = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
lowerCAmelCase__ : Optional[int] = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
lowerCAmelCase__ : Dict = shave_segments(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=SCREAMING_SNAKE_CASE_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) -> str:
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowerCAmelCase__ : List[Any] = old_checkpoint[path]
lowerCAmelCase__ : Dict = old_tensor.shape[0] // 3
lowerCAmelCase__ : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowerCAmelCase__ : str = old_tensor.shape[0] // config['num_head_channels'] // 3
lowerCAmelCase__ : Optional[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowerCAmelCase__ : List[str] = old_tensor.split(channels // num_heads , dim=1 )
lowerCAmelCase__ : str = query.reshape(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = key.reshape(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = value.reshape(SCREAMING_SNAKE_CASE_ )
for path in paths:
lowerCAmelCase__ : Optional[Any] = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowerCAmelCase__ : List[str] = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
lowerCAmelCase__ : int = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
lowerCAmelCase__ : Optional[int] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
lowerCAmelCase__ : str = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowerCAmelCase__ : List[str] = old_checkpoint[path['old']][:, :, 0]
else:
lowerCAmelCase__ : List[Any] = old_checkpoint[path['old']]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = {}
lowerCAmelCase__ : Union[str, Any] = checkpoint['time_embed.0.weight']
lowerCAmelCase__ : Union[str, Any] = checkpoint['time_embed.0.bias']
lowerCAmelCase__ : Dict = checkpoint['time_embed.2.weight']
lowerCAmelCase__ : Tuple = checkpoint['time_embed.2.bias']
lowerCAmelCase__ : Optional[Any] = checkpoint['input_blocks.0.0.weight']
lowerCAmelCase__ : List[str] = checkpoint['input_blocks.0.0.bias']
lowerCAmelCase__ : Tuple = checkpoint['out.0.weight']
lowerCAmelCase__ : List[str] = checkpoint['out.0.bias']
lowerCAmelCase__ : Dict = checkpoint['out.2.weight']
lowerCAmelCase__ : List[str] = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
lowerCAmelCase__ : Dict = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
lowerCAmelCase__ : List[str] = {
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
# Retrieves the keys for the middle blocks only
lowerCAmelCase__ : List[str] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
lowerCAmelCase__ : Union[str, Any] = {
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
# Retrieves the keys for the output blocks only
lowerCAmelCase__ : int = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
lowerCAmelCase__ : Any = {
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[int] = (i - 1) // (config['num_res_blocks'] + 1)
lowerCAmelCase__ : Tuple = (i - 1) % (config['num_res_blocks'] + 1)
lowerCAmelCase__ : List[Any] = [key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
lowerCAmelCase__ : str = [key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
lowerCAmelCase__ : Tuple = checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
lowerCAmelCase__ : Any = checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
lowerCAmelCase__ : Dict = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = {'old': F'''input_blocks.{i}.0''', 'new': F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
lowerCAmelCase__ : str = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path, resnet_op] , config=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Dict = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = {
'old': F'''input_blocks.{i}.1''',
'new': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowerCAmelCase__ : List[str] = {
F'''input_blocks.{i}.1.qkv.bias''': {
'key': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
'key': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , attention_paths_to_split=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ : Optional[Any] = middle_blocks[0]
lowerCAmelCase__ : Optional[Any] = middle_blocks[1]
lowerCAmelCase__ : Union[str, Any] = middle_blocks[2]
lowerCAmelCase__ : List[str] = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Union[str, Any] = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , attention_paths_to_split=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Dict = i // (config['num_res_blocks'] + 1)
lowerCAmelCase__ : Any = i % (config['num_res_blocks'] + 1)
lowerCAmelCase__ : Optional[int] = [shave_segments(SCREAMING_SNAKE_CASE_ , 2 ) for name in output_blocks[i]]
lowerCAmelCase__ : Union[str, Any] = {}
for layer in output_block_layers:
lowerCAmelCase__ : List[str] = layer.split('.' )[0], shave_segments(SCREAMING_SNAKE_CASE_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Any = [layer_name]
if len(SCREAMING_SNAKE_CASE_ ) > 1:
lowerCAmelCase__ : str = [key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
lowerCAmelCase__ : Any = [key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
lowerCAmelCase__ : Any = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = renew_resnet_paths(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = {'old': F'''output_blocks.{i}.0''', 'new': F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowerCAmelCase__ : Union[str, Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
lowerCAmelCase__ : Optional[int] = checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
lowerCAmelCase__ : Dict = checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(SCREAMING_SNAKE_CASE_ ) == 2:
lowerCAmelCase__ : str = []
if len(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : str = renew_attention_paths(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = {
'old': F'''output_blocks.{i}.1''',
'new': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowerCAmelCase__ : str = {
F'''output_blocks.{i}.1.qkv.bias''': {
'key': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
'key': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=SCREAMING_SNAKE_CASE_ , )
else:
lowerCAmelCase__ : List[str] = renew_resnet_paths(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowerCAmelCase__ : Any = '.'.join(['output_blocks', str(SCREAMING_SNAKE_CASE_ ), path['old']] )
lowerCAmelCase__ : Optional[int] = '.'.join(['up_blocks', str(SCREAMING_SNAKE_CASE_ ), 'resnets', str(SCREAMING_SNAKE_CASE_ ), path['new']] )
lowerCAmelCase__ : str = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
lowerCamelCase__ = json.loads(f.read())
lowerCamelCase__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
lowerCamelCase__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
lowerCamelCase__ = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
lowerCamelCase__ = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
lowerCamelCase__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path) | 362 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(a , config_name=a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('gpt2' )
lowerCAmelCase__ : Any = GenerationConfig.from_model_config(a )
lowerCAmelCase__ : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : Dict = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
lowerCAmelCase__ : List[Any] = copy.deepcopy(a )
lowerCAmelCase__ : Dict = generation_config.update(**a )
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {'foo': 'bar'} )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(a )
lowerCAmelCase__ : List[Any] = GenerationConfig.from_pretrained(a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
lowerCAmelCase__ : int = GenerationConfig.from_model_config(a )
assert not hasattr(a , 'foo' ) # no new kwargs should be initialized if from config
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase__ : List[Any] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
lowerCAmelCase__ : Dict = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) ) | 307 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ : Optional[Any] =ArgumentParser("""Transformers CLI tool""", usage="""transformers-cli <command> [<args>]""" )
A__ : List[Any] =parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__snake_case )
DownloadCommand.register_subcommand(__snake_case )
EnvironmentCommand.register_subcommand(__snake_case )
RunCommand.register_subcommand(__snake_case )
ServeCommand.register_subcommand(__snake_case )
UserCommands.register_subcommand(__snake_case )
AddNewModelCommand.register_subcommand(__snake_case )
AddNewModelLikeCommand.register_subcommand(__snake_case )
LfsCommands.register_subcommand(__snake_case )
PTtoTFCommand.register_subcommand(__snake_case )
# Let's go
A__ : Optional[int] =parser.parse_args()
if not hasattr(__snake_case, """func""" ):
parser.print_help()
exit(1 )
# Run
A__ : Optional[Any] =args.func(__snake_case )
service.run()
if __name__ == "__main__":
main()
| 134 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['speech']
def __init__( self : Tuple , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Dict ) -> int:
'''simple docstring'''
requires_backends(self , ["""speech"""] )
class lowerCamelCase ( metaclass=lowercase_ ):
'''simple docstring'''
__snake_case = ['speech']
def __init__( self : Union[str, Any] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[str] ) -> str:
'''simple docstring'''
requires_backends(self , ["""speech"""] )
| 134 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''xglm'''
lowerCamelCase :Tuple = ['''past_key_values''']
lowerCamelCase :Tuple = {
'''num_attention_heads''': '''attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowerCAmelCase_=25_60_08 , lowerCAmelCase_=20_48 , lowerCAmelCase_=10_24 , lowerCAmelCase_=40_96 , lowerCAmelCase_=24 , lowerCAmelCase_=16 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , **lowerCAmelCase_ , ) -> List[str]:
_A = vocab_size
_A = max_position_embeddings
_A = d_model
_A = ffn_dim
_A = num_layers
_A = attention_heads
_A = activation_function
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = layerdrop
_A = init_std
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = use_cache
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 360 | def snake_case ( snake_case__ :str , snake_case__ :str) -> list:
_A = len(snake_case__)
_A = []
for i in range(len(snake_case__) - pat_len + 1):
_A = True
for j in range(snake_case__):
if s[i + j] != pattern[j]:
_A = False
break
if match_found:
position.append(snake_case__)
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 81 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : Dict = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _UpperCamelCase ( __A = 100 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = 2
for i in range(2 , max_n + 1 ):
UpperCamelCase__ = pre_numerator
UpperCamelCase__ = 2 * i // 3 if i % 3 == 0 else 1
UpperCamelCase__ = cur_numerator
UpperCamelCase__ = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 80 | 1 |
from collections.abc import Generator
from math import sin
def a__ ( __UpperCamelCase ):
if len(__UpperCamelCase ) != 3_2:
raise ValueError("Input must be of length 32" )
SCREAMING_SNAKE_CASE_ = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def a__ ( __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
SCREAMING_SNAKE_CASE_ = format(__UpperCamelCase , "08x" )[-8:]
SCREAMING_SNAKE_CASE_ = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = b""
for char in message:
bit_string += format(__UpperCamelCase , "08b" ).encode("utf-8" )
SCREAMING_SNAKE_CASE_ = format(len(__UpperCamelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__UpperCamelCase ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def a__ ( __UpperCamelCase ):
if len(__UpperCamelCase ) % 5_1_2 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__UpperCamelCase ) , 5_1_2 ):
SCREAMING_SNAKE_CASE_ = bit_string[pos : pos + 5_1_2]
SCREAMING_SNAKE_CASE_ = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def a__ ( __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
SCREAMING_SNAKE_CASE_ = format(__UpperCamelCase , "032b" )
SCREAMING_SNAKE_CASE_ = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__UpperCamelCase , 2 )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
return (a + b) % 2**3_2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = preprocess(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
SCREAMING_SNAKE_CASE_ = 0X67452301
SCREAMING_SNAKE_CASE_ = 0Xefcdab89
SCREAMING_SNAKE_CASE_ = 0X98badcfe
SCREAMING_SNAKE_CASE_ = 0X10325476
SCREAMING_SNAKE_CASE_ = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = aa
SCREAMING_SNAKE_CASE_ = ba
SCREAMING_SNAKE_CASE_ = ca
SCREAMING_SNAKE_CASE_ = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
SCREAMING_SNAKE_CASE_ = d ^ (b & (c ^ d))
SCREAMING_SNAKE_CASE_ = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
SCREAMING_SNAKE_CASE_ = c ^ (d & (b ^ c))
SCREAMING_SNAKE_CASE_ = (5 * i + 1) % 1_6
elif i <= 4_7:
SCREAMING_SNAKE_CASE_ = b ^ c ^ d
SCREAMING_SNAKE_CASE_ = (3 * i + 5) % 1_6
else:
SCREAMING_SNAKE_CASE_ = c ^ (b | not_aa(__UpperCamelCase ))
SCREAMING_SNAKE_CASE_ = (7 * i) % 1_6
SCREAMING_SNAKE_CASE_ = (f + a + added_consts[i] + block_words[g]) % 2**3_2
SCREAMING_SNAKE_CASE_ = d
SCREAMING_SNAKE_CASE_ = c
SCREAMING_SNAKE_CASE_ = b
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , left_rotate_aa(__UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305 | from collections.abc import Generator
from math import sin
def a__ ( __UpperCamelCase ):
if len(__UpperCamelCase ) != 3_2:
raise ValueError("Input must be of length 32" )
SCREAMING_SNAKE_CASE_ = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def a__ ( __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
SCREAMING_SNAKE_CASE_ = format(__UpperCamelCase , "08x" )[-8:]
SCREAMING_SNAKE_CASE_ = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = b""
for char in message:
bit_string += format(__UpperCamelCase , "08b" ).encode("utf-8" )
SCREAMING_SNAKE_CASE_ = format(len(__UpperCamelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__UpperCamelCase ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def a__ ( __UpperCamelCase ):
if len(__UpperCamelCase ) % 5_1_2 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__UpperCamelCase ) , 5_1_2 ):
SCREAMING_SNAKE_CASE_ = bit_string[pos : pos + 5_1_2]
SCREAMING_SNAKE_CASE_ = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def a__ ( __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
SCREAMING_SNAKE_CASE_ = format(__UpperCamelCase , "032b" )
SCREAMING_SNAKE_CASE_ = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__UpperCamelCase , 2 )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
return (a + b) % 2**3_2
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = preprocess(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
SCREAMING_SNAKE_CASE_ = 0X67452301
SCREAMING_SNAKE_CASE_ = 0Xefcdab89
SCREAMING_SNAKE_CASE_ = 0X98badcfe
SCREAMING_SNAKE_CASE_ = 0X10325476
SCREAMING_SNAKE_CASE_ = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = aa
SCREAMING_SNAKE_CASE_ = ba
SCREAMING_SNAKE_CASE_ = ca
SCREAMING_SNAKE_CASE_ = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
SCREAMING_SNAKE_CASE_ = d ^ (b & (c ^ d))
SCREAMING_SNAKE_CASE_ = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
SCREAMING_SNAKE_CASE_ = c ^ (d & (b ^ c))
SCREAMING_SNAKE_CASE_ = (5 * i + 1) % 1_6
elif i <= 4_7:
SCREAMING_SNAKE_CASE_ = b ^ c ^ d
SCREAMING_SNAKE_CASE_ = (3 * i + 5) % 1_6
else:
SCREAMING_SNAKE_CASE_ = c ^ (b | not_aa(__UpperCamelCase ))
SCREAMING_SNAKE_CASE_ = (7 * i) % 1_6
SCREAMING_SNAKE_CASE_ = (f + a + added_consts[i] + block_words[g]) % 2**3_2
SCREAMING_SNAKE_CASE_ = d
SCREAMING_SNAKE_CASE_ = c
SCREAMING_SNAKE_CASE_ = b
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , left_rotate_aa(__UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = sum_aa(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self , a__ , a__=13 , a__=32 , a__=2 , a__=3 , a__=16 , a__=[32, 64, 128] , a__=[1, 2, 1] , a__=[2, 2, 4] , a__=2 , a__=2.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=True , a__=0.0_2 , a__=1e-5 , a__=True , a__=None , a__=True , a__=10 , a__=8 , a__=["stage1", "stage2"] , a__=[1, 2] , ) -> Any:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = embed_dim
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = num_heads
snake_case_ = window_size
snake_case_ = mlp_ratio
snake_case_ = qkv_bias
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = drop_path_rate
snake_case_ = hidden_act
snake_case_ = use_absolute_embeddings
snake_case_ = patch_norm
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = is_training
snake_case_ = scope
snake_case_ = use_labels
snake_case_ = type_sequence_label_size
snake_case_ = encoder_stride
snake_case_ = out_features
snake_case_ = out_indices
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = FocalNetModel(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ )
snake_case_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Dict:
'''simple docstring'''
snake_case_ = FocalNetBackbone(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
snake_case_ = None
snake_case_ = FocalNetBackbone(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = FocalNetForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = FocalNetForMaskedImageModeling(a__ )
model.to(a__ )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(a__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> List[str]:
'''simple docstring'''
snake_case_ = self.type_sequence_label_size
snake_case_ = FocalNetForImageClassification(a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = FocalNetForImageClassification(a__ )
model.to(a__ )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Union[str, Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : Union[str, Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Any = False
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = FocalNetModelTester(self )
snake_case_ = ConfigTester(self , config_class=a__ , embed_dim=37 , has_text_modality=a__ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case_ = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case_ = model_class(a__ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> str:
'''simple docstring'''
snake_case_ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(a__ , a__ ) )
snake_case_ = outputs.hidden_states
snake_case_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a__ ) , a__ )
# FocalNet has a different seq_length
snake_case_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
snake_case_ = outputs.reshaped_hidden_states
self.assertEqual(len(a__ ) , a__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = reshaped_hidden_states[0].shape
snake_case_ = (
reshaped_hidden_states[0].view(a__ , a__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
snake_case_ = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
snake_case_ = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
@slow
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = FocalNetModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(a__ )
for model_class in self.all_model_classes:
snake_case_ = model_class(config=a__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(a__ )
snake_case_ = self.default_image_processor
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
snake_case_ = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
snake_case_ = model(**a__ )
# verify the logits
snake_case_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a__ )
snake_case_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class _snake_case ( lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase_ : List[Any] = FocalNetConfig
lowerCAmelCase_ : Tuple = False
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = FocalNetModelTester(self )
| 85 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 319 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = "dpt"
def __init__( self : Dict , __lowerCamelCase : Any=7_68 , __lowerCamelCase : Any=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : int=30_72 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : List[str]=1e-12 , __lowerCamelCase : Dict=3_84 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=[2, 5, 8, 11] , __lowerCamelCase : List[str]="project" , __lowerCamelCase : Dict=[4, 2, 1, 0.5] , __lowerCamelCase : int=[96, 1_92, 3_84, 7_68] , __lowerCamelCase : int=2_56 , __lowerCamelCase : str=-1 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : int=0.4 , __lowerCamelCase : Optional[int]=2_55 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=[1, 10_24, 24, 24] , __lowerCamelCase : Any=[0, 1] , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Dict , ) -> int:
super().__init__(**__lowerCamelCase )
A : str = hidden_size
A : Optional[Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
A : Optional[int] = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
A : int = BitConfig(**__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
logger.info("Initializing the config with a `BiT` backbone." )
A : str = BitConfig(**__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
A : List[Any] = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
A : Union[str, Any] = backbone_featmap_shape
A : int = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
A : List[Any] = None
A : Dict = None
A : str = []
A : List[str] = num_hidden_layers
A : List[str] = num_attention_heads
A : Any = intermediate_size
A : str = hidden_act
A : int = hidden_dropout_prob
A : Dict = attention_probs_dropout_prob
A : List[Any] = initializer_range
A : str = layer_norm_eps
A : Optional[int] = image_size
A : List[str] = patch_size
A : Dict = num_channels
A : Optional[Any] = qkv_bias
A : Union[str, Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
A : int = readout_type
A : Union[str, Any] = reassemble_factors
A : List[str] = neck_hidden_sizes
A : List[str] = fusion_hidden_size
A : List[Any] = head_in_index
A : str = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
A : Dict = use_auxiliary_head
A : int = auxiliary_loss_weight
A : List[Any] = semantic_loss_ignore_index
A : Dict = semantic_classifier_dropout
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
A : Any = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
A : Union[str, Any] = self.backbone_config.to_dict()
A : Any = self.__class__.model_type
return output | 368 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE = {"""facebook/bart-base""": BartForConditionalGeneration}
__SCREAMING_SNAKE_CASE = {"""facebook/bart-base""": BartTokenizer}
def UpperCAmelCase ( ):
A : List[Any] = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=_lowerCamelCase , default=_lowerCamelCase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=_lowerCamelCase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=_lowerCamelCase , default=_lowerCamelCase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_lowerCamelCase , )
parser.add_argument(
"--config_name" , type=_lowerCamelCase , default=_lowerCamelCase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=_lowerCamelCase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=_lowerCamelCase , default=_lowerCamelCase , help="Where to store the final ONNX file." )
A : Any = parser.parse_args()
return args
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase="cpu" ):
A : int = model_dict[model_name].from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
A : List[Any] = tokenizer_dict[model_name].from_pretrained(_lowerCamelCase )
if model_name in ["facebook/bart-base"]:
A : Optional[int] = 0
A : Union[str, Any] = None
A : Optional[Any] = 0
return huggingface_model, tokenizer
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
model.eval()
A : Optional[Any] = None
A : List[Any] = torch.jit.script(BARTBeamSearchGenerator(_lowerCamelCase ) )
with torch.no_grad():
A : int = "My friends are cool but they eat too many carbs."
A : List[Any] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device )
A : int = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=_lowerCamelCase , max_length=_lowerCamelCase , early_stopping=_lowerCamelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_lowerCamelCase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _lowerCamelCase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=_lowerCamelCase , )
logger.info("Model exported to {}".format(_lowerCamelCase ) )
A : Optional[Any] = remove_dup_initializers(os.path.abspath(_lowerCamelCase ) )
logger.info("Deduplicated and optimized model written to {}".format(_lowerCamelCase ) )
A : List[Any] = onnxruntime.InferenceSession(_lowerCamelCase )
A : Dict = ort_sess.run(
_lowerCamelCase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(_lowerCamelCase ),
"max_length": np.array(_lowerCamelCase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def UpperCAmelCase ( ):
A : Union[str, Any] = parse_args()
A : List[Any] = 5
A : str = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
A : Union[str, Any] = torch.device(args.device )
A , A : Optional[int] = load_model_tokenizer(args.model_name_or_path , _lowerCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(_lowerCamelCase )
if args.max_length:
A : Optional[int] = args.max_length
if args.num_beams:
A : List[Any] = args.num_beams
if args.output_file_path:
A : int = args.output_file_path
else:
A : int = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main() | 256 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : str = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 95 |
import string
import numpy
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : List[str] =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__lowerCamelCase : List[Any] =numpy.vectorize(lambda lowerCamelCase__ : x % 36 )
__lowerCamelCase : Optional[Any] =numpy.vectorize(lowerCamelCase__ )
def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray ):
'''simple docstring'''
__a = self.modulus(__lowercase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__a = encrypt_key.shape[0]
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
return self.key_string.index(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : int ):
'''simple docstring'''
return self.key_string[round(__lowercase )]
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = len(self.key_string )
if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1:
__a = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
__a = [char for char in text.upper() if char in self.key_string]
__a = chars[-1]
while len(__lowercase ) % self.break_key != 0:
chars.append(__lowercase )
return "".join(__lowercase )
def UpperCamelCase_ ( self : List[str] , __lowercase : str ):
'''simple docstring'''
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[
0
]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__a = i
break
__a = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowercase ) )
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
__a = self.make_decrypt_key()
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = int(input("""Enter the order of the encryption key: """ ) )
__a = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(_SCREAMING_SNAKE_CASE ):
__a = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()]
hill_matrix.append(_SCREAMING_SNAKE_CASE )
__a = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__a = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__a = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(_SCREAMING_SNAKE_CASE ) )
elif option == "2":
__a = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302 | 0 |
"""simple docstring"""
def lowercase ( A_ )-> list[int]:
'''simple docstring'''
a : Union[str, Any] = [0 for i in range(len(A_ ) )]
# initialize interval's left pointer and right pointer
a , a : Optional[int] = 0, 0
for i in range(1 , len(A_ ) ):
# case when current index is inside the interval
if i <= right_pointer:
a : Optional[int] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
a : int = min_edge
while go_next(A_ , A_ , A_ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
a , a : Dict = i, i + z_result[i] - 1
return z_result
def lowercase ( A_ , A_ , A_ )-> bool:
'''simple docstring'''
return i + z_result[i] < len(A_ ) and s[z_result[i]] == s[i + z_result[i]]
def lowercase ( A_ , A_ )-> int:
'''simple docstring'''
a : Tuple = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
a : Optional[Any] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(A_ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__lowercase = logging.get_logger(__name__)
@add_end_docstrings(_a )
class _A ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , **__UpperCAmelCase : List[Any]):
super().__init__(**__UpperCAmelCase)
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''')
requires_backends(self , "vision")
self.check_model_type(__UpperCAmelCase)
def __call__( self : str , __UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , __UpperCAmelCase : Union[str, List[str]] = None , **__UpperCAmelCase : List[Any] , ):
if "text_queries" in kwargs:
a : List[Any] = kwargs.pop("text_queries")
if isinstance(__UpperCAmelCase , (str, Image.Image)):
a : Any = {"image": image, "candidate_labels": candidate_labels}
else:
a : Optional[int] = image
a : Optional[int] = super().__call__(__UpperCAmelCase , **__UpperCAmelCase)
return results
def __snake_case ( self : Optional[int] , **__UpperCAmelCase : List[Any]):
a : str = {}
if "threshold" in kwargs:
a : Dict = kwargs["threshold"]
if "top_k" in kwargs:
a : str = kwargs["top_k"]
return {}, {}, postprocess_params
def __snake_case ( self : List[Any] , __UpperCAmelCase : Optional[Any]):
a : Union[str, Any] = load_image(inputs["image"])
a : Any = inputs["candidate_labels"]
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Optional[Any] = candidate_labels.split(",")
a : Union[str, Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(__UpperCAmelCase):
a : int = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework)
a : int = self.image_processor(__UpperCAmelCase , return_tensors=self.framework)
yield {
"is_last": i == len(__UpperCAmelCase) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __snake_case ( self : Dict , __UpperCAmelCase : Optional[int]):
a : List[Any] = model_inputs.pop("target_size")
a : Optional[int] = model_inputs.pop("candidate_label")
a : List[Any] = model_inputs.pop("is_last")
a : List[Any] = self.model(**__UpperCAmelCase)
a : Union[str, Any] = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __snake_case ( self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[str]=None):
a : Dict = []
for model_output in model_outputs:
a : int = model_output["candidate_label"]
a : Any = BaseModelOutput(__UpperCAmelCase)
a : Optional[Any] = self.image_processor.post_process_object_detection(
outputs=__UpperCAmelCase , threshold=__UpperCAmelCase , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
a : Any = outputs["scores"][index].item()
a : str = self._get_bounding_box(outputs["boxes"][index][0])
a : Optional[Any] = {"score": score, "label": label, "box": box}
results.append(__UpperCAmelCase)
a : str = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase: x["score"] , reverse=__UpperCAmelCase)
if top_k:
a : Union[str, Any] = results[:top_k]
return results
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : "torch.Tensor"):
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
a , a , a , a : List[Any] = box.int().tolist()
a : str = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 226 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A , __A , __A ) -> bool:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__A ) )
def _UpperCamelCase ( __A , __A , __A , __A ) -> bool:
'''simple docstring'''
if index == len(__A ):
return True
# Recursive Step
for i in range(__A ):
if valid_coloring(graph[index] , __A , __A ):
# Color current vertex
UpperCamelCase__ = i
# Validate coloring
if util_color(__A , __A , __A , index + 1 ):
return True
# Backtrack
UpperCamelCase__ = -1
return False
def _UpperCamelCase ( __A , __A ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ = [-1] * len(__A )
if util_color(__A , __A , __A , 0 ):
return colored_vertices
return []
| 80 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A , __A , __A ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 1 |
from math import pow, sqrt
def A ( *_lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowercase ) > 0 and all(value > 0.0 for value in values )
return result
def A ( _lowercase , _lowercase ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowercase , _lowercase )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def A ( _lowercase , _lowercase , _lowercase ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowercase , _lowercase , _lowercase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def A ( _lowercase , _lowercase , _lowercase ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowercase , _lowercase , _lowercase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def A ( _lowercase , _lowercase , _lowercase ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_lowercase , _lowercase , _lowercase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def A ( _lowercase , _lowercase , _lowercase ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_lowercase , _lowercase , _lowercase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 258 | import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
__UpperCamelCase : Optional[int] = parser.parse_args()
if args.model_type == "bert":
__UpperCamelCase : Optional[int] = BertForMaskedLM.from_pretrained(args.model_name)
__UpperCamelCase : Optional[int] = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
__UpperCamelCase : List[Any] = model.state_dict()
__UpperCamelCase : Union[str, Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
__UpperCamelCase : List[Any] = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
__UpperCamelCase : Optional[int] = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
__UpperCamelCase : Any = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__UpperCamelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
__UpperCamelCase : Union[str, Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
__UpperCamelCase : Union[str, Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
__UpperCamelCase : List[str] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
__UpperCamelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
__UpperCamelCase : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
__UpperCamelCase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
__UpperCamelCase : List[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
__UpperCamelCase : List[str] = state_dict['cls.predictions.decoder.weight']
__UpperCamelCase : int = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
__UpperCamelCase : List[str] = state_dict[f"""cls.predictions.transform.dense.{w}"""]
__UpperCamelCase : List[Any] = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 258 | 1 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(__lowercase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A_ :
def lowercase ( self : str , snake_case_ : int , snake_case_ : int ):
pass
def lowercase ( self : Optional[Any] ):
pass
def lowercase ( self : int ):
pass
def lowercase ( self : List[Any] , snake_case_ : int , snake_case_ : Dict , snake_case_ : Any , snake_case_ : int , snake_case_ : List[str]=None , **snake_case_ : int ):
_UpperCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case_ , snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel(snake_case_ )
_UpperCAmelCase = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def lowercase ( self : List[str] , snake_case_ : int , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Tuple=None , **snake_case_ : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = self.get_vision_text_model(snake_case_ , snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
_UpperCAmelCase = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Tuple , snake_case_ : int , snake_case_ : Union[str, Any]=None , **snake_case_ : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.get_vision_text_model(snake_case_ , snake_case_ )
_UpperCAmelCase = {"vision_model": vision_model, "text_model": text_model}
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case_ )
_UpperCAmelCase = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase ( self : Optional[int] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Optional[Any]=None , **snake_case_ : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.get_vision_text_model(snake_case_ , snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
_UpperCAmelCase = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
_UpperCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(snake_case_ )
_UpperCAmelCase = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ )
_UpperCAmelCase = after_output[0].numpy()
_UpperCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1e-5 )
def lowercase ( self : int , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : int=None , **snake_case_ : str ):
_UpperCAmelCase , _UpperCAmelCase = self.get_vision_text_model(snake_case_ , snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
_UpperCAmelCase = model(
input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , output_attentions=snake_case_ )
_UpperCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(snake_case_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = to_atuple(vision_model.config.image_size )
_UpperCAmelCase = to_atuple(vision_model.config.patch_size )
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCAmelCase = output.text_model_output.attentions
self.assertEqual(len(snake_case_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase ( self : Optional[int] , snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : float ):
_UpperCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(snake_case_ , snake_case_ , f'Difference between torch and flax is {diff} (>= {tol}).' )
def lowercase ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**snake_case_ )
@slow
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.get_pretrained_model_and_inputs()
_UpperCAmelCase = model_a(**snake_case_ )
_UpperCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(snake_case_ )
_UpperCAmelCase = model_a(**snake_case_ )
_UpperCAmelCase = after_outputs[0].numpy()
_UpperCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1e-5 )
@require_tf
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
_UpperCAmelCase = 1_3
_UpperCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_UpperCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_UpperCAmelCase = random_attention_mask([batch_size, 4] )
_UpperCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowercase ( self : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
_UpperCAmelCase = TFViTModel(snake_case_ , name="vision_model" )
_UpperCAmelCase = TFBertModel(snake_case_ , name="text_model" )
return vision_model, text_model
def lowercase ( self : int ):
_UpperCAmelCase = TFViTModelTester(self )
_UpperCAmelCase = TFBertModelTester(self )
_UpperCAmelCase = vit_model_tester.prepare_config_and_inputs()
_UpperCAmelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = vision_config_and_inputs
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
def lowercase ( self : List[Any] ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
_UpperCAmelCase = 1_3
_UpperCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_UpperCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_UpperCAmelCase = random_attention_mask([batch_size, 4] )
_UpperCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowercase ( self : int , snake_case_ : int , snake_case_ : Dict , snake_case_ : str , snake_case_ : Tuple , snake_case_ : str=None , **snake_case_ : Tuple ):
_UpperCAmelCase , _UpperCAmelCase = self.get_vision_text_model(snake_case_ , snake_case_ )
_UpperCAmelCase = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ )
_UpperCAmelCase = model(
input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , output_attentions=snake_case_ )
_UpperCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(snake_case_ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_UpperCAmelCase = to_atuple(vision_model.config.image_size )
_UpperCAmelCase = to_atuple(vision_model.config.patch_size )
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCAmelCase = output.text_model_output.attentions
self.assertEqual(len(snake_case_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase ( self : List[Any] , snake_case_ : Dict , snake_case_ : Optional[Any] ):
_UpperCAmelCase = TFDeiTModel(snake_case_ , name="vision_model" )
_UpperCAmelCase = TFRobertaModel(snake_case_ , name="text_model" )
return vision_model, text_model
def lowercase ( self : Any ):
_UpperCAmelCase = TFDeiTModelTester(self )
_UpperCAmelCase = TFRobertaModelTester(self )
_UpperCAmelCase = vit_model_tester.prepare_config_and_inputs()
_UpperCAmelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = vision_config_and_inputs
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
_UpperCAmelCase = 1_3
_UpperCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_UpperCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_UpperCAmelCase = random_attention_mask([batch_size, 4] )
_UpperCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowercase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : List[Any] ):
_UpperCAmelCase = TFCLIPVisionModel(snake_case_ , name="vision_model" )
_UpperCAmelCase = TFBertModel(snake_case_ , name="text_model" )
return vision_model, text_model
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = TFCLIPVisionModelTester(self )
_UpperCAmelCase = TFBertModelTester(self )
_UpperCAmelCase = clip_model_tester.prepare_config_and_inputs()
_UpperCAmelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase = vision_config_and_inputs
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A_ ( unittest.TestCase ):
@slow
def lowercase ( self : int ):
_UpperCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=snake_case_ )
_UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_UpperCAmelCase = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=snake_case_ , padding=snake_case_ , return_tensors="np" )
_UpperCAmelCase = model(**snake_case_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_UpperCAmelCase = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , snake_case_ , atol=1e-3 ) )
| 22 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowercase__ ( nn.Module ):
A__ : int
A__ : int
A__ : float =0.0
A__ : int =1
A__ : int =1
A__ : bool =True
A__ : bool =False
A__ : bool =False
A__ : bool =False
A__ : jnp.dtype =jnp.floataa
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=UpperCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = resnets
SCREAMING_SNAKE_CASE__ = attentions
if self.add_downsample:
SCREAMING_SNAKE_CASE__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=True ):
SCREAMING_SNAKE_CASE__ = ()
for resnet, attn in zip(self.resnets , self.attentions ):
SCREAMING_SNAKE_CASE__ = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE__ = self.downsamplers_a(UpperCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase__ ( nn.Module ):
A__ : int
A__ : int
A__ : float =0.0
A__ : int =1
A__ : bool =True
A__ : jnp.dtype =jnp.floataa
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=UpperCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = resnets
if self.add_downsample:
SCREAMING_SNAKE_CASE__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]=True ):
SCREAMING_SNAKE_CASE__ = ()
for resnet in self.resnets:
SCREAMING_SNAKE_CASE__ = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE__ = self.downsamplers_a(UpperCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase__ ( nn.Module ):
A__ : int
A__ : int
A__ : int
A__ : float =0.0
A__ : int =1
A__ : int =1
A__ : bool =True
A__ : bool =False
A__ : bool =False
A__ : bool =False
A__ : jnp.dtype =jnp.floataa
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE__ = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = resnets
SCREAMING_SNAKE_CASE__ = attentions
if self.add_upsample:
SCREAMING_SNAKE_CASE__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
SCREAMING_SNAKE_CASE__ = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
if self.add_upsample:
SCREAMING_SNAKE_CASE__ = self.upsamplers_a(UpperCAmelCase_ )
return hidden_states
class lowercase__ ( nn.Module ):
A__ : int
A__ : int
A__ : int
A__ : float =0.0
A__ : int =1
A__ : bool =True
A__ : jnp.dtype =jnp.floataa
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE__ = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = resnets
if self.add_upsample:
SCREAMING_SNAKE_CASE__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict=True ):
for resnet in self.resnets:
# pop res hidden states
SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
SCREAMING_SNAKE_CASE__ = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
if self.add_upsample:
SCREAMING_SNAKE_CASE__ = self.upsamplers_a(UpperCAmelCase_ )
return hidden_states
class lowercase__ ( nn.Module ):
A__ : int
A__ : float =0.0
A__ : int =1
A__ : int =1
A__ : bool =False
A__ : bool =False
A__ : jnp.dtype =jnp.floataa
def A_ ( self : Optional[int] ):
# there is always at least one resnet
SCREAMING_SNAKE_CASE__ = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
SCREAMING_SNAKE_CASE__ = []
for _ in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = resnets
SCREAMING_SNAKE_CASE__ = attentions
def __call__( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=True ):
SCREAMING_SNAKE_CASE__ = self.resnets[0](UpperCAmelCase_ , UpperCAmelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
SCREAMING_SNAKE_CASE__ = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
return hidden_states
| 176 | 0 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase__ : Any =get_logger(__name__)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
os.path.join(_A , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__SCREAMING_SNAKE_CASE = Extractor
def _A ( self , _A ):
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__SCREAMING_SNAKE_CASE = os.path.abspath(_A )
return os.path.join(self.extract_dir , hash_url_to_filename(_A ) )
def _A ( self , _A , _A ):
'''simple docstring'''
return force_extract or (
not os.path.isfile(_A ) and not (os.path.isdir(_A ) and os.listdir(_A ))
)
def _A ( self , _A , _A = False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.extractor.infer_extractor_format(_A )
if not extractor_format:
return input_path
__SCREAMING_SNAKE_CASE = self._get_output_path(_A )
if self._do_extract(_A , _A ):
self.extractor.extract(_A , _A , _A )
return output_path
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
@classmethod
@abstractmethod
def _A ( cls , _A , **_A ):
'''simple docstring'''
...
@staticmethod
@abstractmethod
def _A ( _A , _A ):
'''simple docstring'''
...
class UpperCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : List[bytes] = []
@staticmethod
def _A ( _A , _A ):
'''simple docstring'''
with open(_A , 'rb' ) as f:
return f.read(_A )
@classmethod
def _A ( cls , _A , _A = b"" ):
'''simple docstring'''
if not magic_number:
__SCREAMING_SNAKE_CASE = max(len(_A ) for cls_magic_number in cls.magic_numbers )
try:
__SCREAMING_SNAKE_CASE = cls.read_magic_number(_A , _A )
except OSError:
return False
return any(magic_number.startswith(_A ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
@classmethod
def _A ( cls , _A , **_A ):
'''simple docstring'''
return tarfile.is_tarfile(_A )
@staticmethod
def _A ( _A , _A ):
'''simple docstring'''
def resolved(_A ) -> str:
return os.path.realpath(os.path.abspath(_A ) )
def badpath(_A , _A ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_A , _A ) ).startswith(_A )
def badlink(_A , _A ) -> bool:
# Links are interpreted relative to the directory containing the link
__SCREAMING_SNAKE_CASE = resolved(os.path.join(_A , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_A )
__SCREAMING_SNAKE_CASE = resolved(_A )
for finfo in members:
if badpath(finfo.name , _A ):
logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(_A , _A ):
logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(_A , _A ):
logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def _A ( _A , _A ):
'''simple docstring'''
os.makedirs(_A , exist_ok=_A )
__SCREAMING_SNAKE_CASE = tarfile.open(_A )
tar_file.extractall(_A , members=TarExtractor.safemembers(_A , _A ) )
tar_file.close()
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : int = [B'''\x1F\x8B''']
@staticmethod
def _A ( _A , _A ):
'''simple docstring'''
with gzip.open(_A , 'rb' ) as gzip_file:
with open(_A , 'wb' ) as extracted_file:
shutil.copyfileobj(_A , _A )
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def _A ( cls , _A , _A = b"" ):
'''simple docstring'''
if super().is_extractable(_A , magic_number=_A ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_A , 'rb' ) as fp:
__SCREAMING_SNAKE_CASE = _EndRecData(_A )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__SCREAMING_SNAKE_CASE = fp.read(_A ) # CD is where we expect it to be
if len(_A ) == sizeCentralDir:
__SCREAMING_SNAKE_CASE = struct.unpack(_A , _A ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _A ( _A , _A ):
'''simple docstring'''
os.makedirs(_A , exist_ok=_A )
with zipfile.ZipFile(_A , 'r' ) as zip_file:
zip_file.extractall(_A )
zip_file.close()
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : int = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def _A ( _A , _A ):
'''simple docstring'''
with lzma.open(_A ) as compressed_file:
with open(_A , 'wb' ) as extracted_file:
shutil.copyfileobj(_A , _A )
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Any = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def _A ( _A , _A ):
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile' )
import rarfile
os.makedirs(_A , exist_ok=_A )
__SCREAMING_SNAKE_CASE = rarfile.RarFile(_A )
rf.extractall(_A )
rf.close()
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Dict = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def _A ( _A , _A ):
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard' )
import zstandard as zstd
__SCREAMING_SNAKE_CASE = zstd.ZstdDecompressor()
with open(_A , 'rb' ) as ifh, open(_A , 'wb' ) as ofh:
dctx.copy_stream(_A , _A )
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = [B'''\x42\x5A\x68''']
@staticmethod
def _A ( _A , _A ):
'''simple docstring'''
with bza.open(_A , 'rb' ) as compressed_file:
with open(_A , 'wb' ) as extracted_file:
shutil.copyfileobj(_A , _A )
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : List[str] = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def _A ( _A , _A ):
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr' )
import pyazr
os.makedirs(_A , exist_ok=_A )
with pyazr.SevenZipFile(_A , 'r' ) as archive:
archive.extractall(_A )
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = [B'''\x04\x22\x4D\x18''']
@staticmethod
def _A ( _A , _A ):
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4' )
import lza.frame
with lza.frame.open(_A , 'rb' ) as compressed_file:
with open(_A , 'wb' ) as extracted_file:
shutil.copyfileobj(_A , _A )
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _A ( cls ):
'''simple docstring'''
return max(
len(_A )
for extractor in cls.extractors.values()
if issubclass(_A , _A )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _A ( _A , _A ):
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(_A , magic_number_length=_A )
except OSError:
return b""
@classmethod
def _A ( cls , _A , _A = False ):
'''simple docstring'''
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=_A , )
__SCREAMING_SNAKE_CASE = cls.infer_extractor_format(_A )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _A ( cls , _A ): # <Added version="2.4.0"/>
'''simple docstring'''
__SCREAMING_SNAKE_CASE = cls._get_magic_number_max_length()
__SCREAMING_SNAKE_CASE = cls._read_magic_number(_A , _A )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_A , magic_number=_A ):
return extractor_format
@classmethod
def _A ( cls , _A , _A , _A = None , _A = "deprecated" , ):
'''simple docstring'''
os.makedirs(os.path.dirname(_A ) , exist_ok=_A )
# Prevent parallel extractions
__SCREAMING_SNAKE_CASE = str(Path(_A ).with_suffix('.lock' ) )
with FileLock(_A ):
shutil.rmtree(_A , ignore_errors=_A )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_A , _A ): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=_A , )
__SCREAMING_SNAKE_CASE = extractor if extractor != 'deprecated' else extractor_format
else:
__SCREAMING_SNAKE_CASE = cls.extractors[extractor_format]
return extractor.extract(_A , _A )
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=_A , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_A ):
return extractor.extract(_A , _A )
| 118 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A = None , _A = None , _A = False , **_A , ):
'''simple docstring'''
super().__init__(features=_A , cache_dir=_A , keep_in_memory=_A , **_A )
__SCREAMING_SNAKE_CASE = Sql(
cache_dir=_A , features=_A , sql=_A , con=_A , **_A , )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , )
# Build dataset for splits
__SCREAMING_SNAKE_CASE = self.builder.as_dataset(
split='train' , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A , _A , _A , _A = None , _A = None , **_A , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = name
__SCREAMING_SNAKE_CASE = con
__SCREAMING_SNAKE_CASE = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__SCREAMING_SNAKE_CASE = num_proc
__SCREAMING_SNAKE_CASE = to_sql_kwargs
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.to_sql_kwargs.pop('sql' , _A )
__SCREAMING_SNAKE_CASE = self.to_sql_kwargs.pop('con' , _A )
__SCREAMING_SNAKE_CASE = self.to_sql_kwargs.pop('index' , _A )
__SCREAMING_SNAKE_CASE = self._write(index=_A , **self.to_sql_kwargs )
return written
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = args
__SCREAMING_SNAKE_CASE = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
__SCREAMING_SNAKE_CASE = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
__SCREAMING_SNAKE_CASE = batch.to_pandas()
__SCREAMING_SNAKE_CASE = df.to_sql(self.name , self.con , index=_A , **_A )
return num_rows or len(_A )
def _A ( self , _A , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 118 | 1 |
'''simple docstring'''
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
lowercase_ = logging.getLogger(__name__)
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : int = 'token-classification'
def __init__(self , A ) -> Any:
"""simple docstring"""
if type(A ) == dict:
_a = Namespace(**A )
_a = import_module('''tasks''' )
try:
_a = getattr(A , hparams.task_type )
_a = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
_a = self.token_classification_task.get_labels(hparams.labels )
_a = CrossEntropyLoss().ignore_index
super().__init__(A , len(self.labels ) , self.mode )
def a__ (self , **A ) -> Dict:
"""simple docstring"""
return self.model(**A )
def a__ (self , A , A ) -> Optional[int]:
"""simple docstring"""
_a = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
_a = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
_a = self(**A )
_a = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def a__ (self ) -> int:
"""simple docstring"""
_a = self.hparams
for mode in ["train", "dev", "test"]:
_a = self._feature_file(A )
if os.path.exists(A ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , A )
_a = torch.load(A )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
_a = self.token_classification_task.read_examples_from_file(args.data_dir , A )
_a = self.token_classification_task.convert_examples_to_features(
A , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=A , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , A )
torch.save(A , A )
def a__ (self , A , A , A = False ) -> DataLoader:
"""simple docstring"""
_a = self._feature_file(A )
logger.info('''Loading features from cached file %s''' , A )
_a = torch.load(A )
_a = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_a = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
_a = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
_a = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
_a = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(A , A , A , A ) , batch_size=A )
def a__ (self , A , A ) -> Union[str, Any]:
"""simple docstring"""
"""Compute validation""" ""
_a = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
_a = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
_a = self(**A )
_a , _a = outputs[:2]
_a = logits.detach().cpu().numpy()
_a = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def a__ (self , A ) -> Optional[int]:
"""simple docstring"""
_a = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
_a = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
_a = np.argmax(A , axis=2 )
_a = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
_a = dict(enumerate(self.labels ) )
_a = [[] for _ in range(out_label_ids.shape[0] )]
_a = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
_a = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(A , A ),
'''precision''': precision_score(A , A ),
'''recall''': recall_score(A , A ),
'''f1''': fa_score(A , A ),
}
_a = dict(results.items() )
_a = results
return ret, preds_list, out_label_list
def a__ (self , A ) -> List[Any]:
"""simple docstring"""
_a , _a , _a = self._eval_end(A )
_a = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def a__ (self , A ) -> Optional[Any]:
"""simple docstring"""
_a , _a , _a = self._eval_end(A )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
_a = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def a__ (A , A ) -> Dict:
"""simple docstring"""
BaseTransformer.add_model_specific_args(A , A )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=A , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=A , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
lowercase_ = NERTransformer.add_model_specific_args(parser, os.getcwd())
lowercase_ = parser.parse_args()
lowercase_ = NERTransformer(args)
lowercase_ = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
lowercase_ = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
lowercase_ = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 211 |
'''simple docstring'''
from math import sqrt
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (
number >= 0
), "'number' must been an int and positive"
_a = True
# 0 and 1 are none primes.
if number <= 1:
_a = False
for divisor in range(2 , int(round(sqrt(__A))) + 1):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_a = False
break
# precondition
assert isinstance(__A , __A), "'status' must been from type bool"
return status
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_a = list(range(2 , n + 1))
_a = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__A)):
for j in range(i + 1 , len(__A)):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_a = 0
# filters actual prime numbers.
_a = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__A , __A), "'ans' must been from type list"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (n > 2), "'N' must been an int and > 2"
_a = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1):
if is_prime(__A):
ans.append(__A)
# precondition
assert isinstance(__A , __A), "'ans' must been from type list"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and number >= 0, "'number' must been an int and >= 0"
_a = [] # this list will be returns of the function.
# potential prime number factors.
_a = 2
_a = number
if number == 0 or number == 1:
ans.append(__A)
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__A):
while quotient != 1:
if is_prime(__A) and (quotient % factor == 0):
ans.append(__A)
quotient /= factor
else:
factor += 1
else:
ans.append(__A)
# precondition
assert isinstance(__A , __A), "'ans' must been from type list"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (
number >= 0
), "'number' bust been an int and >= 0"
_a = 0
# prime factorization of 'number'
_a = prime_factorization(__A)
_a = max(__A)
# precondition
assert isinstance(__A , __A), "'ans' must been from type int"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (
number >= 0
), "'number' bust been an int and >= 0"
_a = 0
# prime factorization of 'number'
_a = prime_factorization(__A)
_a = min(__A)
# precondition
assert isinstance(__A , __A), "'ans' must been from type int"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A), "'number' must been an int"
assert isinstance(number % 2 == 0 , __A), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A), "'number' must been an int"
assert isinstance(number % 2 != 0 , __A), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase (__A):
"""simple docstring"""
assert (
isinstance(__A , __A) and (number > 2) and is_even(__A)
), "'number' must been an int, even and > 2"
_a = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_a = get_prime_numbers(__A)
_a = len(__A)
# run variable for while-loops.
_a = 0
_a = None
# exit variable. for break up the loops
_a = True
while i < len_pn and loop:
_a = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_a = False
ans.append(prime_numbers[i])
ans.append(prime_numbers[j])
j += 1
i += 1
# precondition
assert (
isinstance(__A , __A)
and (len(__A) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0])
and is_prime(ans[1])
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase (__A , __A):
"""simple docstring"""
assert (
isinstance(__A , __A)
and isinstance(__A , __A)
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_a = 0
while numbera != 0:
_a = numbera % numbera
_a = numbera
_a = rest
# precondition
assert isinstance(__A , __A) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase (__A , __A):
"""simple docstring"""
assert (
isinstance(__A , __A)
and isinstance(__A , __A)
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_a = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_a = prime_factorization(__A)
_a = prime_factorization(__A)
elif numbera == 1 or numbera == 1:
_a = []
_a = []
_a = max(__A , __A)
_a = 0
_a = 0
_a = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_a = prime_fac_a.count(__A)
_a = prime_fac_a.count(__A)
for _ in range(max(__A , __A)):
ans *= n
else:
_a = prime_fac_a.count(__A)
for _ in range(__A):
ans *= n
done.append(__A)
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_a = prime_fac_a.count(__A)
for _ in range(__A):
ans *= n
done.append(__A)
# precondition
assert isinstance(__A , __A) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (n >= 0), "'number' must been a positive int"
_a = 0
_a = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__A):
ans += 1
# precondition
assert isinstance(__A , __A) and is_prime(
__A), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase (__A , __A):
"""simple docstring"""
assert (
is_prime(__A) and is_prime(__A) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_a = p_number_a + 1 # jump to the next number
_a = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__A):
number += 1
while number < p_number_a:
ans.append(__A)
number += 1
# fetch the next prime number.
while not is_prime(__A):
number += 1
# precondition
assert (
isinstance(__A , __A)
and ans[0] != p_number_a
and ans[len(__A) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (n >= 1), "'n' must been int and >= 1"
_a = [] # will be returned.
for divisor in range(1 , n + 1):
if n % divisor == 0:
ans.append(__A)
# precondition
assert ans[0] == 1 and ans[len(__A) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (
number > 1
), "'number' must been an int and >= 1"
_a = get_divisors(__A)
# precondition
assert (
isinstance(__A , __A)
and (divisors[0] == 1)
and (divisors[len(__A) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1]) == number
def lowerCAmelCase (__A , __A):
"""simple docstring"""
assert (
isinstance(__A , __A)
and isinstance(__A , __A)
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_a = gcd(abs(__A) , abs(__A))
# precondition
assert (
isinstance(__A , __A)
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (n >= 0), "'n' must been a int and >= 0"
_a = 1 # this will be return.
for factor in range(1 , n + 1):
ans *= factor
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (n >= 0), "'n' must been an int and >= 0"
_a = 0
_a = 1
_a = 1 # this will be return
for _ in range(n - 1):
_a = ans
ans += fiba
_a = tmp
return ans
| 211 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = botoa.client('''iam''' )
SCREAMING_SNAKE_CASE_ = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__lowerCamelCase, AssumeRolePolicyDocument=json.dumps(__lowerCamelCase, indent=2 ) )
SCREAMING_SNAKE_CASE_ = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__lowerCamelCase, PolicyName=F'''{role_name}_policy_permission''', PolicyDocument=json.dumps(__lowerCamelCase, indent=2 ), )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = botoa.client('''iam''' )
return iam_client.get_role(RoleName=__lowerCamelCase )["Role"]["Arn"]
def A__ ( ):
SCREAMING_SNAKE_CASE_ = _ask_options(
'''How do you want to authorize?''', ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''], __lowerCamelCase, )
SCREAMING_SNAKE_CASE_ = None
if credentials_configuration == 0:
SCREAMING_SNAKE_CASE_ = _ask_field('''Enter your AWS Profile name: [default] ''', default='''default''' )
SCREAMING_SNAKE_CASE_ = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
SCREAMING_SNAKE_CASE_ = _ask_field('''AWS Access Key ID: ''' )
SCREAMING_SNAKE_CASE_ = aws_access_key_id
SCREAMING_SNAKE_CASE_ = _ask_field('''AWS Secret Access Key: ''' )
SCREAMING_SNAKE_CASE_ = aws_secret_access_key
SCREAMING_SNAKE_CASE_ = _ask_field('''Enter your AWS Region: [us-east-1]''', default='''us-east-1''' )
SCREAMING_SNAKE_CASE_ = aws_region
SCREAMING_SNAKE_CASE_ = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''', ['''Provide IAM Role name''', '''Create new IAM role using credentials'''], __lowerCamelCase, )
if role_management == 0:
SCREAMING_SNAKE_CASE_ = _ask_field('''Enter your IAM role name: ''' )
else:
SCREAMING_SNAKE_CASE_ = '''accelerate_sagemaker_execution_role'''
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''', _convert_yes_no_to_bool, default=__lowerCamelCase, error_message='''Please enter yes or no.''', )
SCREAMING_SNAKE_CASE_ = None
if is_custom_docker_image:
SCREAMING_SNAKE_CASE_ = _ask_field('''Enter your Docker image: ''', lambda __lowerCamelCase : str(__lowerCamelCase ).lower() )
SCREAMING_SNAKE_CASE_ = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''', _convert_yes_no_to_bool, default=__lowerCamelCase, error_message='''Please enter yes or no.''', )
SCREAMING_SNAKE_CASE_ = None
if is_sagemaker_inputs_enabled:
SCREAMING_SNAKE_CASE_ = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''', lambda __lowerCamelCase : str(__lowerCamelCase ).lower(), )
SCREAMING_SNAKE_CASE_ = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''', _convert_yes_no_to_bool, default=__lowerCamelCase, error_message='''Please enter yes or no.''', )
SCREAMING_SNAKE_CASE_ = None
if is_sagemaker_metrics_enabled:
SCREAMING_SNAKE_CASE_ = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''', lambda __lowerCamelCase : str(__lowerCamelCase ).lower(), )
SCREAMING_SNAKE_CASE_ = _ask_options(
'''What is the distributed mode?''', ['''No distributed training''', '''Data parallelism'''], _convert_sagemaker_distributed_mode, )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''', _convert_yes_no_to_bool, default=__lowerCamelCase, error_message='''Please enter yes or no.''', )
if use_dynamo:
SCREAMING_SNAKE_CASE_ = '''dynamo_'''
SCREAMING_SNAKE_CASE_ = _ask_options(
'''Which dynamo backend would you like to use?''', [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, )
SCREAMING_SNAKE_CASE_ = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''', _convert_yes_no_to_bool, default=__lowerCamelCase, error_message='''Please enter yes or no.''', )
if use_custom_options:
SCREAMING_SNAKE_CASE_ = _ask_options(
'''Which mode do you want to use?''', __lowerCamelCase, lambda __lowerCamelCase : TORCH_DYNAMO_MODES[int(__lowerCamelCase )], default='''default''', )
SCREAMING_SNAKE_CASE_ = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''', _convert_yes_no_to_bool, default=__lowerCamelCase, error_message='''Please enter yes or no.''', )
SCREAMING_SNAKE_CASE_ = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''', _convert_yes_no_to_bool, default=__lowerCamelCase, error_message='''Please enter yes or no.''', )
SCREAMING_SNAKE_CASE_ = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
SCREAMING_SNAKE_CASE_ = _ask_options(
__lowerCamelCase, __lowerCamelCase, lambda __lowerCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__lowerCamelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
SCREAMING_SNAKE_CASE_ = _ask_field(__lowerCamelCase, lambda __lowerCamelCase : str(__lowerCamelCase ).lower(), default='''ml.p3.2xlarge''' )
SCREAMING_SNAKE_CASE_ = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
SCREAMING_SNAKE_CASE_ = _ask_field(
'''How many machines do you want use? [1]: ''', __lowerCamelCase, default=1, )
SCREAMING_SNAKE_CASE_ = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''', ['''no''', '''fp16''', '''bf16''', '''fp8'''], _convert_mixed_precision, )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=__lowerCamelCase, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=__lowerCamelCase, use_cpu=__lowerCamelCase, dynamo_config=__lowerCamelCase, eca_instance_type=__lowerCamelCase, profile=__lowerCamelCase, region=__lowerCamelCase, iam_role_name=__lowerCamelCase, mixed_precision=__lowerCamelCase, num_machines=__lowerCamelCase, sagemaker_inputs_file=__lowerCamelCase, sagemaker_metrics_file=__lowerCamelCase, )
| 257 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["transformers", "torch", "note_seq"]
def __init__( self , *_A , **_A ) -> Any:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> List[str]:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 257 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
_lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
_lowerCamelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
def __lowerCamelCase ( A__ , A__=100 , A__=" " ) -> List[str]:
"""simple docstring"""
UpperCamelCase = text.split(A__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(A__ ) , A__ )]
def __lowerCamelCase ( A__ ) -> dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(A__ ):
titles.append(title if title is not None else '' )
texts.append(A__ )
return {"title": titles, "text": texts}
def __lowerCamelCase ( A__ , A__ , A__ ) -> dict:
"""simple docstring"""
UpperCamelCase = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=A__ , padding='longest' , return_tensors='pt' )['input_ids']
UpperCamelCase = ctx_encoder(input_ids.to(device=A__ ) , return_dict=A__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __lowerCamelCase ( A__ , A__ , A__ , ) -> Optional[int]:
"""simple docstring"""
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCamelCase = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCamelCase = dataset.map(A__ , batched=A__ , num_proc=processing_args.num_proc )
# And compute the embeddings
UpperCamelCase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=A__ )
UpperCamelCase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
UpperCamelCase = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
UpperCamelCase = dataset.map(
partial(A__ , ctx_encoder=A__ , ctx_tokenizer=A__ ) , batched=A__ , batch_size=processing_args.batch_size , features=A__ , )
# And finally save your dataset
UpperCamelCase = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(A__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCamelCase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=A__ )
# And save the index
UpperCamelCase = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(A__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = field(
default=str(Path(_a ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
_SCREAMING_SNAKE_CASE = field(
default=_a , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
_SCREAMING_SNAKE_CASE = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
_SCREAMING_SNAKE_CASE = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
_SCREAMING_SNAKE_CASE = field(
default=str(Path(_a ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = field(
default=_a , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
_SCREAMING_SNAKE_CASE = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = field(
default=768 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
_SCREAMING_SNAKE_CASE = field(
default=128 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
_lowerCamelCase : Any = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Optional[Any] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 28 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263 | 0 |
'''simple docstring'''
from __future__ import annotations
_a : Tuple = 1.6021E-19 # units = C
def _lowerCAmelCase ( lowercase , lowercase , lowercase , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = [False] * len(lowercase )
__lowerCAmelCase = [-1] * len(lowercase )
def dfs(lowercase , lowercase ):
__lowerCAmelCase = True
__lowerCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(lowercase , 1 - c )
for i in range(len(lowercase ) ):
if not visited[i]:
dfs(lowercase , 0 )
for i in range(len(lowercase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_a : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 46 | 1 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__A = "base_with_context"
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
lowerCamelCase__: Union[str, Any] =nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=UpperCamelCase__ )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__: Optional[Any] =weights[F"""layers_{lyr_num}"""]
lowerCamelCase__: Union[str, Any] =nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowerCamelCase__: List[Any] =ly_weight["attention"]
lowerCamelCase__: Any =nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase__: List[str] =nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase__: int =nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase__: str =nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase__: Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCamelCase__: List[Any] =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCamelCase__: Union[str, Any] =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCamelCase__: List[Any] =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCamelCase__: Any =nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase_ ( __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
lowerCamelCase__: List[Any] =nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=UpperCamelCase__ )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__: Union[str, Any] =weights[F"""layers_{lyr_num}"""]
lowerCamelCase__: List[Any] =ly_weight["attention"]
lowerCamelCase__: Dict =nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase__: Tuple =nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase__: List[str] =nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase__: Dict =nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase__: Any =nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowerCamelCase__: List[str] =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCamelCase__: Dict =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCamelCase__: str =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCamelCase__: Tuple =nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCamelCase__: int =nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase_ ( __a , __a ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: Dict =nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
lowerCamelCase__: int =nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
lowerCamelCase__: Optional[int] =nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=UpperCamelCase__ )
lowerCamelCase__: str =nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCamelCase__: List[Any] =weights[F"""layers_{lyr_num}"""]
lowerCamelCase__: Union[str, Any] =nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
lowerCamelCase__: List[Any] =nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
lowerCamelCase__: int =ly_weight["self_attention"]
lowerCamelCase__: List[str] =nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase__: Union[str, Any] =nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase__: Dict =nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase__: int =nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase__: List[Any] =ly_weight["MultiHeadDotProductAttention_0"]
lowerCamelCase__: str =nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase__: int =nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase__: List[str] =nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase__: List[str] =nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase__: List[Any] =nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
lowerCamelCase__: Any =nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCamelCase__: Any =nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
lowerCamelCase__: Tuple =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCamelCase__: Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCamelCase__: Tuple =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCamelCase__: str =nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
lowerCamelCase__: Optional[int] =nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def lowerCAmelCase_ ( __a ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: List[str] =checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCamelCase__: Optional[Any] =jnp.tree_util.tree_map(onp.array , UpperCamelCase__ )
lowerCamelCase__: Union[str, Any] =[
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
lowerCamelCase__: Any =os.path.join(args.checkpoint_path , ".." , "config.gin" )
lowerCamelCase__: Optional[int] =inference.parse_training_gin_file(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__: List[str] =inference.InferenceModel(args.checkpoint_path , UpperCamelCase__ )
lowerCamelCase__: Union[str, Any] =DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
lowerCamelCase__: Dict =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
lowerCamelCase__: Dict =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
lowerCamelCase__: List[str] =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCamelCase__: List[Any] =load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , UpperCamelCase__ )
lowerCamelCase__: List[str] =load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , UpperCamelCase__ )
lowerCamelCase__: int =load_decoder(ta_checkpoint["target"]["decoder"] , UpperCamelCase__ )
lowerCamelCase__: Any =OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
lowerCamelCase__: Union[str, Any] =SpectrogramDiffusionPipeline(
notes_encoder=UpperCamelCase__ , continuous_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ , scheduler=UpperCamelCase__ , melgan=UpperCamelCase__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f'{MODEL}/checkpoint_500000',
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
__A = parser.parse_args()
main(args)
| 10 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ = 1_00_00_00 ) -> int:
__lowerCamelCase = set(range(3 , UpperCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCamelCase__ , UpperCamelCase__ ) ) )
__lowerCamelCase = [float(UpperCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCamelCase__ , limit + 1 , UpperCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 67 | 0 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCAmelCase : List[Any] ="""src/diffusers"""
__lowerCAmelCase : Any ="""."""
# This is to make sure the diffusers module imported is the one in the repo.
__lowerCAmelCase : Tuple =importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
__lowerCAmelCase : Dict =spec.loader.load_module()
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ) -> Tuple:
'''simple docstring'''
return line.startswith(lowerCAmelCase__ ) or len(lowerCAmelCase__ ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , lowerCAmelCase__ ) is not None
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] ) -> Any:
'''simple docstring'''
lowercase = object_name.split(""".""" )
lowercase = 0
# First let's find the module where our object lives.
lowercase = parts[i]
while i < len(lowerCAmelCase__ ) and not os.path.isfile(os.path.join(lowerCAmelCase__ , f'{module}.py' ) ):
i += 1
if i < len(lowerCAmelCase__ ):
lowercase = os.path.join(lowerCAmelCase__ , parts[i] )
if i >= len(lowerCAmelCase__ ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(lowerCAmelCase__ , f'{module}.py' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase = f.readlines()
# Now let's find the class / func in the code!
lowercase = """"""
lowercase = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase__ ) and re.search(Rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowercase = line_index
while line_index < len(lowerCAmelCase__ ) and _should_continue(lines[line_index] , lowerCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase = lines[start_index:line_index]
return "".join(lowerCAmelCase__ )
__lowerCAmelCase : Any =re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
__lowerCAmelCase : Union[str, Any] =re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
__lowerCAmelCase : Tuple =re.compile(R"""<FILL\s+[^>]*>""")
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] ) -> Any:
'''simple docstring'''
lowercase = code.split("""\n""" )
lowercase = 0
while idx < len(lowerCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCAmelCase__ ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
lowercase = len(get_indent(lowerCAmelCase__ ) ) > 0
if has_indent:
lowercase = f'class Bla:\n{code}'
lowercase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=lowerCAmelCase__ )
lowercase = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__ )
lowercase , lowercase = style_docstrings_in_code(lowerCAmelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase = f.readlines()
lowercase = []
lowercase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase__ ):
lowercase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowercase , lowercase , lowercase = search.groups()
lowercase = find_code_in_diffusers(lowerCAmelCase__ )
lowercase = get_indent(lowerCAmelCase__ )
lowercase = line_index + 1 if indent == theoretical_indent else line_index + 2
lowercase = theoretical_indent
lowercase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowercase = True
while line_index < len(lowerCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
break
lowercase = lines[line_index]
lowercase = _should_continue(lowerCAmelCase__ , lowerCAmelCase__ ) and re.search(f'^{indent}# End copy' , lowerCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase = lines[start_index:line_index]
lowercase = """""".join(lowerCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
lowercase = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(lowerCAmelCase__ ) is None]
lowercase = """\n""".join(lowerCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase__ ) > 0:
lowercase = replace_pattern.replace("""with""" , """""" ).split(""",""" )
lowercase = [_re_replace_pattern.search(lowerCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowercase , lowercase , lowercase = pattern.groups()
lowercase = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if option.strip() == "all-casing":
lowercase = re.sub(obja.lower() , obja.lower() , lowerCAmelCase__ )
lowercase = re.sub(obja.upper() , obja.upper() , lowerCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowercase = blackify(lines[start_index - 1] + theoretical_code )
lowercase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowercase = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowercase = start_index + 1
if overwrite and len(lowerCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowerCAmelCase__ )
return diffs
def UpperCAmelCase__ ( lowerCAmelCase__ :bool = False ) -> Union[str, Any]:
'''simple docstring'''
lowercase = glob.glob(os.path.join(lowerCAmelCase__ , """**/*.py""" ) , recursive=lowerCAmelCase__ )
lowercase = []
for filename in all_files:
lowercase = is_copy_consistent(lowerCAmelCase__ , lowerCAmelCase__ )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(lowerCAmelCase__ ) > 0:
lowercase = """\n""".join(lowerCAmelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase : Optional[Any] =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 32 | """simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""only integers accepted as input""" )
else:
lowercase = str(abs(lowerCAmelCase__ ) )
lowercase = [list(lowerCAmelCase__ ) for char in range(len(lowerCAmelCase__ ) )]
for index in range(len(lowerCAmelCase__ ) ):
num_transpositions[index].pop(lowerCAmelCase__ )
return max(
int("""""".join(list(lowerCAmelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 32 | 1 |
'''simple docstring'''
import random
def _UpperCamelCase ( __A , __A , __A = False ) -> dict:
'''simple docstring'''
UpperCamelCase__ = {i: [] for i in range(__A )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__A )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__A ):
for j in range(i + 1 , __A ):
if random.random() < probability:
graph[i].append(__A )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__A )
return graph
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(__A ) if i != j] for i in range(__A )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = "M-CLIP"
def __init__( self , __A=1024 , __A=768 , **__A ):
"""simple docstring"""
lowerCamelCase : str = transformerDimSize
lowerCamelCase : Any = imageDimSize
super().__init__(**__A )
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Tuple = MCLIPConfig
def __init__( self , __A , *__A , **__A ):
"""simple docstring"""
super().__init__(__A , *__A , **__A )
lowerCamelCase : Tuple = XLMRobertaModel(__A )
lowerCamelCase : Optional[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : Any = self.transformer(input_ids=__A , attention_mask=__A )[0]
lowerCamelCase : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__A ), embs
| 283 | 0 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase ( _lowerCamelCase : Optional[int] ):
return EnvironmentCommand()
class UpperCAmelCase ( UpperCamelCase__ ):
@staticmethod
def UpperCAmelCase_ ( lowercase_ :ArgumentParser )-> int:
A__ = parser.add_parser("env" )
download_parser.set_defaults(func=lowercase_ )
def UpperCAmelCase_ ( self :Any )-> Dict:
A__ = huggingface_hub.__version__
A__ = "not installed"
A__ = "NA"
if is_torch_available():
import torch
A__ = torch.__version__
A__ = torch.cuda.is_available()
A__ = "not installed"
if is_transformers_available():
import transformers
A__ = transformers.__version__
A__ = "not installed"
if is_accelerate_available():
import accelerate
A__ = accelerate.__version__
A__ = "not installed"
if is_xformers_available():
import xformers
A__ = xformers.__version__
A__ = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F"{pt_version} ({pt_cuda_available})",
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(lowercase_ ) )
return info
@staticmethod
def UpperCAmelCase_ ( lowercase_ :Optional[Any] )-> Optional[Any]:
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 123 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase :
def __init__( self :str , lowercase_ :str , )-> str:
A__ = parent
A__ = 13
A__ = 7
A__ = True
A__ = True
A__ = False
A__ = True
A__ = 99
A__ = 32
A__ = 2
A__ = 4
A__ = 37
A__ = "gelu"
A__ = 0.1
A__ = 0.1
A__ = 5_12
A__ = 16
A__ = 2
A__ = 0.0_2
A__ = 3
A__ = 4
A__ = None
def UpperCAmelCase_ ( self :Union[str, Any] )-> int:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self :str , lowercase_ :Optional[int] , lowercase_ :List[str] , lowercase_ :Any , lowercase_ :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :str )-> List[str]:
A__ = TFDistilBertModel(config=lowercase_ )
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
A__ = model(lowercase_ )
A__ = [input_ids, input_mask]
A__ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self :List[str] , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Optional[int] , lowercase_ :Optional[int] , lowercase_ :Optional[int] , lowercase_ :Union[str, Any] )-> Optional[int]:
A__ = TFDistilBertForMaskedLM(config=lowercase_ )
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self :Any , lowercase_ :str , lowercase_ :str , lowercase_ :Optional[int] , lowercase_ :str , lowercase_ :List[Any] , lowercase_ :Union[str, Any] )-> Optional[int]:
A__ = TFDistilBertForQuestionAnswering(config=lowercase_ )
A__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
A__ = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :Any , lowercase_ :Dict , lowercase_ :Tuple , lowercase_ :Optional[Any] , lowercase_ :Optional[int] )-> Any:
A__ = self.num_labels
A__ = TFDistilBertForSequenceClassification(lowercase_ )
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self :str , lowercase_ :Optional[Any] , lowercase_ :List[Any] , lowercase_ :Dict , lowercase_ :Tuple , lowercase_ :int , lowercase_ :Union[str, Any] )-> str:
A__ = self.num_choices
A__ = TFDistilBertForMultipleChoice(lowercase_ )
A__ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
A__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self :str , lowercase_ :Any , lowercase_ :List[str] , lowercase_ :Any , lowercase_ :int , lowercase_ :List[Any] , lowercase_ :Tuple )-> Tuple:
A__ = self.num_labels
A__ = TFDistilBertForTokenClassification(lowercase_ )
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self :Any )-> Union[str, Any]:
A__ = self.prepare_config_and_inputs()
((A__), (A__), (A__), (A__), (A__), (A__)) = config_and_inputs
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__lowercase = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__lowercase = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase = False
__lowercase = False
def UpperCAmelCase_ ( self :Optional[Any] )-> List[Any]:
A__ = TFDistilBertModelTester(self )
A__ = ConfigTester(self , config_class=lowercase_ , dim=37 )
def UpperCAmelCase_ ( self :Tuple )-> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self :int )-> Tuple:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] )-> Optional[Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase_ )
def UpperCAmelCase_ ( self :str )-> str:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> Dict:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> Optional[int]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase_ )
def UpperCAmelCase_ ( self :str )-> int:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase_ )
@slow
def UpperCAmelCase_ ( self :List[str] )-> Dict:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
A__ = TFDistilBertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self :List[Any] )-> Any:
A__ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
A__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A__ = model(lowercase_ )[0]
A__ = [1, 6, 7_68]
self.assertEqual(output.shape , lowercase_ )
A__ = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1E-4 )
| 123 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = '''dpr'''
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Tuple=3_0_5_2_2 ,SCREAMING_SNAKE_CASE__ : Any=7_6_8 ,SCREAMING_SNAKE_CASE__ : str=1_2 ,SCREAMING_SNAKE_CASE__ : Tuple=1_2 ,SCREAMING_SNAKE_CASE__ : Any=3_0_7_2 ,SCREAMING_SNAKE_CASE__ : Dict="gelu" ,SCREAMING_SNAKE_CASE__ : Tuple=0.1 ,SCREAMING_SNAKE_CASE__ : str=0.1 ,SCREAMING_SNAKE_CASE__ : int=5_1_2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=1E-12 ,SCREAMING_SNAKE_CASE__ : Dict=0 ,SCREAMING_SNAKE_CASE__ : List[Any]="absolute" ,SCREAMING_SNAKE_CASE__ : int = 0 ,**SCREAMING_SNAKE_CASE__ : Dict ,):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = vocab_size
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : Dict = hidden_act
__lowerCamelCase : str = intermediate_size
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : Tuple = max_position_embeddings
__lowerCamelCase : int = type_vocab_size
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : List[str] = layer_norm_eps
__lowerCamelCase : Any = projection_dim
__lowerCamelCase : Union[str, Any] = position_embedding_type
| 73 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Dict = len(lowerCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCamelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase__ ):
return None
__lowerCamelCase : Tuple = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__lowerCamelCase : List[Any] = left
__lowerCamelCase : Tuple = point
elif point > right:
__lowerCamelCase : Dict = right
__lowerCamelCase : str = point
else:
if item < current_item:
__lowerCamelCase : Dict = point - 1
else:
__lowerCamelCase : Dict = point + 1
return None
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCamelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowerCamelCase__ , lowerCamelCase__ , point + 1 , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
if collection != sorted(lowerCamelCase__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
a =0
if debug == 1:
a =[10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
a =67
a =interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print("""Not found""")
| 73 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
A__ : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
A__ : Union[str, Any] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
A__ : Any = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Any = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Dict = ElectraTokenizer
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[int]=None , snake_case__ : Optional[Any]=True , snake_case__ : List[Any]="[UNK]" , snake_case__ : str="[SEP]" , snake_case__ : Tuple="[PAD]" , snake_case__ : str="[CLS]" , snake_case__ : int="[MASK]" , snake_case__ : List[str]=True , snake_case__ : List[Any]=None , **snake_case__ : Optional[int] , ):
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
lowerCamelCase_ : Optional[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars
):
lowerCamelCase_ : int =getattr(snake_case__ , normalizer_state.pop("type" ) )
lowerCamelCase_ : Dict =do_lower_case
lowerCamelCase_ : Any =strip_accents
lowerCamelCase_ : Optional[Any] =tokenize_chinese_chars
lowerCamelCase_ : Optional[Any] =normalizer_class(**snake_case__ )
lowerCamelCase_ : Dict =do_lower_case
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Tuple=None ):
lowerCamelCase_ : Tuple =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCamelCase_ : Dict =[self.sep_token_id]
lowerCamelCase_ : List[str] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : str , snake_case__ : str , snake_case__ : Optional[str] = None ):
lowerCamelCase_ : Optional[Any] =self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 209 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
A__ : Optional[int] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 209 | 1 |
import os
def a ( ) -> Any:
"""simple docstring"""
with open(os.path.dirname(A__ ) + '/p022_names.txt' ) as file:
_lowercase =str(file.readlines()[0] )
_lowercase =names.replace('"' , '' ).split(',' )
names.sort()
_lowercase =0
_lowercase =0
for i, name in enumerate(A__ ):
for letter in name:
name_score += ord(A__ ) - 64
total_score += (i + 1) * name_score
_lowercase =0
return total_score
if __name__ == "__main__":
print(solution())
| 205 |
from __future__ import annotations
def a ( A__ : list[int] ) -> int:
"""simple docstring"""
if not nums:
return 0
_lowercase =nums[0]
_lowercase =0
for num in nums[1:]:
_lowercase , _lowercase =(
max_excluding + num,
max(A__ , A__ ),
)
return max(A__ , A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 205 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __magic_name__ ( _UpperCAmelCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase_ , """width_multiplier""" ) )
class __magic_name__ :
def __init__( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : List[str]=13 , lowercase_ : Optional[int]=64 , lowercase_ : int=2 , lowercase_ : Optional[int]=3 , lowercase_ : Dict="swish" , lowercase_ : Union[str, Any]=3 , lowercase_ : int=32 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.02 , lowercase_ : List[str]=True , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=10 , lowercase_ : str=None , lowercase_ : Union[str, Any]=0.25 , lowercase_ : str=0.0 , lowercase_ : int=0.0 , ):
lowercase_ : Optional[int] = parent
lowercase_ : List[str] = batch_size
lowercase_ : str = image_size
lowercase_ : Dict = patch_size
lowercase_ : int = num_channels
lowercase_ : str = make_divisible(512 * width_multiplier , divisor=8 )
lowercase_ : Tuple = hidden_act
lowercase_ : Union[str, Any] = conv_kernel_size
lowercase_ : Optional[int] = output_stride
lowercase_ : List[str] = classifier_dropout_prob
lowercase_ : Optional[Any] = use_labels
lowercase_ : List[Any] = is_training
lowercase_ : List[Any] = num_labels
lowercase_ : int = initializer_range
lowercase_ : List[Any] = scope
lowercase_ : List[str] = width_multiplier
lowercase_ : Dict = ffn_dropout
lowercase_ : Any = attn_dropout
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[str] = None
lowercase_ : int = None
if self.use_labels:
lowercase_ : Any = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase_ : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Optional[int] ):
lowercase_ : Optional[int] = MobileViTVaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : int , lowercase_ : Optional[Any] ):
lowercase_ : Dict = self.num_labels
lowercase_ : Dict = MobileViTVaForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : str , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ):
lowercase_ : Any = self.num_labels
lowercase_ : Dict = MobileViTVaForSemanticSegmentation(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Optional[int] = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[Any] = self.prepare_config_and_inputs()
lowercase_ : Optional[int] = config_and_inputs
lowercase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = MobileViTVaModelTester(self )
lowercase_ : List[str] = MobileViTVaConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : int ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE_ ( self : str ):
pass
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Tuple = model_class(lowercase_ )
lowercase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Any ):
lowercase_ : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Tuple = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : List[str] = outputs.hidden_states
lowercase_ : List[Any] = 5
self.assertEqual(len(lowercase_ ) , lowercase_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowercase_ : Optional[int] = 2
for i in range(len(lowercase_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : int = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = MobileViTVaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Dict = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
lowercase_ )
lowercase_ : List[Any] = self.default_image_processor
lowercase_ : Any = prepare_img()
lowercase_ : Tuple = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[Any] = model(**lowercase_ )
# verify the logits
lowercase_ : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : int = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowercase_ : Dict = model.to(lowercase_ )
lowercase_ : Optional[Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowercase_ : Dict = prepare_img()
lowercase_ : Union[str, Any] = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : List[Any] = model(**lowercase_ )
lowercase_ : Optional[int] = outputs.logits
# verify the logits
lowercase_ : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowercase_ )
lowercase_ : Any = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
] , device=lowercase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowercase_ : Optional[Any] = model.to(lowercase_ )
lowercase_ : Optional[Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowercase_ : str = prepare_img()
lowercase_ : str = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Any = model(**lowercase_ )
lowercase_ : str = outputs.logits.detach().cpu()
lowercase_ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowercase_ , target_sizes=[(50, 60)] )
lowercase_ : Union[str, Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowercase_ )
lowercase_ : str = image_processor.post_process_semantic_segmentation(outputs=lowercase_ )
lowercase_ : Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowercase_ )
| 368 | '''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : Union[tf.Tensor, np.ndarray] ) -> List[int]:
if isinstance(UpperCAmelCase__ , np.ndarray ):
return list(tensor.shape )
lowercase_ : Tuple = tf.shape(UpperCAmelCase__ )
if tensor.shape == tf.TensorShape(UpperCAmelCase__ ):
return dynamic
lowercase_ : Dict = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase__ )]
def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[str] = None ) -> tf.Tensor:
return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCAmelCase__ , name=UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=1e-5 , UpperCAmelCase__ : List[str]=-1 ) -> List[str]:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
lowercase_ , lowercase_ : List[str] = tf.nn.moments(UpperCAmelCase__ , axes=[axis] , keepdims=UpperCAmelCase__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase_ : List[Any] = [1] * inputs.shape.rank
lowercase_ : List[str] = shape_list(UpperCAmelCase__ )[axis]
lowercase_ : List[str] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : List[Any] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
# Compute layer normalization using the batch_normalization
# function.
lowercase_ : str = tf.nn.batch_normalization(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , offset=UpperCAmelCase__ , scale=UpperCAmelCase__ , variance_epsilon=UpperCAmelCase__ , )
return outputs
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Any=-1 ) -> Dict:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase_ : List[Any] = tf.shape(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase_ : Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor ) -> tf.Tensor:
if not isinstance(UpperCAmelCase__ , tf.Tensor ):
lowercase_ : List[Any] = tf.convert_to_tensor(UpperCAmelCase__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase_ : Any = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase_ : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase_ : Optional[Any] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : int , UpperCAmelCase__ : str = "input_ids" ) -> None:
tf.debugging.assert_less(
UpperCAmelCase__ , tf.cast(UpperCAmelCase__ , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase__ )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Any:
lowercase_ : int = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase_ : Optional[Any] = [x for x in data if len(UpperCAmelCase__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
lowercase_ : Any = np.asarray(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = 1
lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = chunk_data
else:
lowercase_ : Any = data
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ) -> str:
if name in group.attrs:
lowercase_ : Optional[Any] = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs[name]]
else:
lowercase_ : int = []
lowercase_ : Optional[int] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Any:
def _expand_single_ad_tensor(UpperCAmelCase__ : Optional[Any] ):
if isinstance(UpperCAmelCase__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase__ )
| 21 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = """marian"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] , __lowerCamelCase : Union[str, Any]=5_81_01 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Dict=10_24 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : Any=40_96 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : int=40_96 , __lowerCamelCase : Tuple=16 , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Any=True , __lowerCamelCase : int=True , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : str=10_24 , __lowerCamelCase : int=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : List[Any]=5_81_00 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Tuple=5_81_00 , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : int=True , **__lowerCamelCase : Tuple , ) -> int:
a = vocab_size
a = decoder_vocab_size or vocab_size
a = max_position_embeddings
a = d_model
a = encoder_ffn_dim
a = encoder_layers
a = encoder_attention_heads
a = decoder_ffn_dim
a = decoder_layers
a = decoder_attention_heads
a = dropout
a = attention_dropout
a = activation_dropout
a = activation_function
a = init_std
a = encoder_layerdrop
a = decoder_layerdrop
a = use_cache
a = encoder_layers
a = scale_embedding # scale factor will be sqrt(d_model) if True
a = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __UpperCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
a = {0: "batch"}
a = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
a = {0: "batch", 1: "decoder_sequence"}
a = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
a , a = self.num_layers
for i in range(__lowerCamelCase ):
a = {0: "batch", 2: "past_sequence + sequence"}
a = {0: "batch", 2: "past_sequence + sequence"}
else:
a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __UpperCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
a = super().outputs
else:
a = super(__lowerCamelCase , self ).outputs
if self.use_past:
a , a = self.num_layers
for i in range(__lowerCamelCase ):
a = {0: "batch", 2: "past_sequence + sequence"}
a = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
a = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Generate decoder inputs
a = seq_length if not self.use_past else 1
a = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
a = dict(**__lowerCamelCase , **__lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
a , a = common_inputs["input_ids"].shape
a = common_inputs["decoder_input_ids"].shape[1]
a , a = self.num_attention_heads
a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a = decoder_seq_length + 3
a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
a = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 )
a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
a , a = self.num_layers
a = min(__lowerCamelCase , __lowerCamelCase )
a = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers
a = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
) )
# TODO: test this.
a = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__lowerCamelCase , __lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
a = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
a , a = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
a = seqlen + 2
a , a = self.num_layers
a , a = self.num_attention_heads
a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a = common_inputs["attention_mask"].dtype
a = torch.cat(
[common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
a = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase )
]
return common_inputs
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a = tokenizer.num_special_tokens_to_add(__lowerCamelCase )
a = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
a = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
a = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return common_inputs
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
else:
a = self._generate_dummy_inputs_for_causal_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
return common_inputs
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : List[Any] ) -> str:
if self.task in ["default", "seq2seq-lm"]:
a = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
a = super(__lowerCamelCase , self )._flatten_past_key_values_(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@property
def __UpperCAmelCase ( self : Tuple ) -> float:
return 1e-4
| 107 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase_ = logging.getLogger(__name__)
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=snake_case , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=snake_case , default='''data/dump''' , help='''The dump file prefix.''' )
__SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__SCREAMING_SNAKE_CASE : str = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__SCREAMING_SNAKE_CASE : str = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(snake_case )} examples to process.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : List[str] = 10_000
__SCREAMING_SNAKE_CASE : Dict = time.time()
for text in data:
__SCREAMING_SNAKE_CASE : Optional[int] = F'''{bos} {text.strip()} {sep}'''
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(snake_case , add_special_tokens=snake_case )
rslt.append(snake_case )
iter += 1
if iter % interval == 0:
__SCREAMING_SNAKE_CASE : List[str] = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(snake_case )} examples processed.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
__SCREAMING_SNAKE_CASE : str = tokenizer.vocab_size
if vocab_size < (1 << 16):
__SCREAMING_SNAKE_CASE : List[str] = [np.uintaa(snake_case ) for d in rslt]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [np.intaa(snake_case ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(snake_case , '''wb''' ) as handle:
pickle.dump(rslt_ , snake_case , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 303 | 0 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
a_ : List[Any] = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_28,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
lowerCamelCase_ =TOKEN
HfFolder.save_token(lowerCAmelCase )
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
config.push_to_hub('''test-config''', use_auth_token=self._token )
lowerCamelCase_ =BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase, getattr(lowerCAmelCase, lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase, repo_id='''test-config''', push_to_hub=lowerCAmelCase, use_auth_token=self._token )
lowerCamelCase_ =BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase, getattr(lowerCAmelCase, lowerCAmelCase ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''', use_auth_token=self._token )
lowerCamelCase_ =BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase, getattr(lowerCAmelCase, lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase, repo_id='''valid_org/test-config-org''', push_to_hub=lowerCAmelCase, use_auth_token=self._token )
lowerCamelCase_ =BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase, getattr(lowerCAmelCase, lowerCAmelCase ) )
def lowercase__ ( self ):
"""simple docstring"""
CustomConfig.register_for_auto_class()
lowerCamelCase_ =CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
lowerCamelCase_ =AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCAmelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, '''CustomConfig''' )
self.assertEqual(new_config.attribute, 42 )
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase_ =c.n_embd + 1 # int
lowerCamelCase_ =c.resid_pdrop + 1.0 # float
lowerCamelCase_ =not c.scale_attn_weights # bool
lowerCamelCase_ =c.summary_type + '''foo''' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCAmelCase, c.n_embd, '''mismatch for key: n_embd''' )
self.assertEqual(lowerCAmelCase, c.resid_pdrop, '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCAmelCase, c.scale_attn_weights, '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCAmelCase, c.summary_type, '''mismatch for key: summary_type''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =PretrainedConfig()
lowerCamelCase_ =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase, ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
lowerCamelCase_ =[key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase, lowerCAmelCase )]
if len(lowerCAmelCase ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f''' {', '.join(lowerCAmelCase )}.''' )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''', subfolder='''bert''' )
self.assertIsNotNone(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =mock.Mock()
lowerCamelCase_ =500
lowerCamelCase_ ={}
lowerCamelCase_ =HTTPError
lowerCamelCase_ ={}
# Download this model to make sure it's in the cache.
lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''', return_value=lowerCAmelCase ) as mock_head:
lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =AutoConfig.from_pretrained('''bert-base-cased''' )
lowerCamelCase_ =['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =2
json.dump(configuration.to_dict(), open(os.path.join(lowerCAmelCase, '''config.4.0.0.json''' ), '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase_ =['''config.42.0.0.json''']
lowerCamelCase_ =768
configuration.save_pretrained(lowerCAmelCase )
shutil.move(os.path.join(lowerCAmelCase, '''config.4.0.0.json''' ), os.path.join(lowerCAmelCase, '''config.42.0.0.json''' ) )
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertEqual(new_configuration.hidden_size, 768 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
lowerCamelCase_ ='''v4.0.0'''
lowerCamelCase_, lowerCamelCase_ =new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase, return_unused_kwargs=lowerCAmelCase )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase_ ='''v3.0.0'''
lowerCamelCase_ =old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase )
self.assertEqual(old_configuration.hidden_size, 768 )
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a_ : Any = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = StableDiffusionSAGPipeline
_snake_case : Any = TEXT_TO_IMAGE_PARAMS
_snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case : int = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case : Dict = False
def a__ ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__a = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__a = CLIPTextModel(lowerCamelCase )
__a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__a = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def a__ ( self , lowerCamelCase , lowerCamelCase=0 ):
if str(lowerCamelCase ).startswith("mps" ):
__a = torch.manual_seed(lowerCamelCase )
else:
__a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__a = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def a__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ):
__a = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
__a = sag_pipe.to(lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = "."
__a = torch.manual_seed(0 )
__a = sag_pipe(
[prompt] , generator=lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
__a = output.images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def a__ ( self ):
__a = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
__a = sag_pipe.to(lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = "."
__a = torch.manual_seed(0 )
__a = sag_pipe(
[prompt] , generator=lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
__a = output.images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def a__ ( self ):
__a = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
__a = sag_pipe.to(lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = "."
__a = torch.manual_seed(0 )
__a = sag_pipe(
[prompt] , width=768 , height=512 , generator=lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
__a = output.images
assert image.shape == (1, 512, 768, 3)
| 261 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:str = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Tuple = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 28 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
def __init__( self : Union[str, Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Dict ):
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 28 | 1 |
'''simple docstring'''
from __future__ import annotations
_lowerCamelCase : Union[str, Any] = 10
def __a ( UpperCAmelCase ) ->Any:
"""simple docstring"""
A = 1
A = max(_SCREAMING_SNAKE_CASE )
while placement <= max_digit:
# declare and initialize empty buckets
A = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
# split list_of_ints between the buckets
for i in list_of_ints:
A = int((i / placement) % RADIX )
buckets[tmp].append(_SCREAMING_SNAKE_CASE )
# put each buckets' contents into list_of_ints
A = 0
for b in range(_SCREAMING_SNAKE_CASE ):
for i in buckets[b]:
A = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_UpperCAmelCase = True
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_UpperCAmelCase = True
if a[i].islower():
_UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260 | 0 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 8 ) -> str:
__lowerCamelCase : Optional[Any] = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowerCamelCase__ ) for _ in range(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(lowerCamelCase__ )
__lowerCamelCase : Dict = i // 3
__lowerCamelCase : Union[str, Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
__lowerCamelCase : Optional[Any] = (
chars_incl
+ random(lowerCamelCase__ , quotient + remainder )
+ random(lowerCamelCase__ , lowerCamelCase__ )
+ random(lowerCamelCase__ , lowerCamelCase__ )
)
__lowerCamelCase : List[Any] = list(lowerCamelCase__ )
shuffle(lowerCamelCase__ )
return "".join(lowerCamelCase__ )
# random is a generalised function for letters, characters and numbers
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
return "".join(secrets.choice(lowerCamelCase__ ) for _ in range(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
pass # Put your code here...
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
pass # Put your code here...
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
pass # Put your code here...
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = 8 ) -> bool:
if len(lowerCamelCase__ ) < min_length:
# Your Password must be at least 8 characters long
return False
__lowerCamelCase : Tuple = any(char in ascii_uppercase for char in password )
__lowerCamelCase : Any = any(char in ascii_lowercase for char in password )
__lowerCamelCase : str = any(char in digits for char in password )
__lowerCamelCase : int = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
__lowerCamelCase : Dict = int(input('Please indicate the max length of your password: ' ).strip() )
__lowerCamelCase : Optional[Any] = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(lowerCamelCase__ ) )
print(
'Alternative Password generated:' , alternative_password_generator(lowerCamelCase__ , lowerCamelCase__ ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 113 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a =open # noqa: we just need to have a builtin inside this module to test it properly
| 113 | 1 |
def __lowerCamelCase ( lowerCamelCase__ : int = 50 ):
'''simple docstring'''
lowerCamelCase = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 252 |
from __future__ import annotations
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : int ):
'''simple docstring'''
if b == 0:
return (1, 0)
((lowerCamelCase) , (lowerCamelCase)) = extended_euclid(lowerCamelCase__ , a % b )
lowerCamelCase = a // b
return (y, x - k * y)
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int ):
'''simple docstring'''
((lowerCamelCase) , (lowerCamelCase)) = extended_euclid(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = na * na
lowerCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : int ):
'''simple docstring'''
((lowerCamelCase) , (lowerCamelCase)) = extended_euclid(lowerCamelCase__ , lowerCamelCase__ )
if b < 0:
lowerCamelCase = (b % n + n) % n
return b
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = invert_modulo(lowerCamelCase__ , lowerCamelCase__ ), invert_modulo(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = na * na
lowerCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 252 | 1 |
def __a ( lowerCAmelCase_ : list ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_= len(lowerCamelCase_ )
for i in range(1 ,lowerCamelCase_ ):
UpperCAmelCase_= collection[i]
UpperCAmelCase_= 0
UpperCAmelCase_= i - 1
while low <= high:
UpperCAmelCase_= (low + high) // 2
if val < collection[mid]:
UpperCAmelCase_= mid - 1
else:
UpperCAmelCase_= mid + 1
for j in range(lowerCamelCase_ ,lowerCamelCase_ ,-1 ):
UpperCAmelCase_= collection[j - 1]
UpperCAmelCase_= val
return collection
if __name__ == "__main__":
__A = input('''Enter numbers separated by a comma:\n''').strip()
__A = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 356 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : pyspark.sql.DataFrame , __UpperCAmelCase : Optional[NamedSplit] = None , __UpperCAmelCase : Optional[Features] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : str = "arrow" , **__UpperCAmelCase : str , ) -> Dict:
super().__init__(
split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase_= load_from_cache_file
UpperCAmelCase_= file_format
UpperCAmelCase_= Spark(
df=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , working_dir=__UpperCAmelCase , **__UpperCAmelCase , )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCAmelCase_= None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__UpperCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 277 | 0 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
if len(UpperCamelCase__ ) < k or k < 0:
raise ValueError("""Invalid Input""" )
UpperCAmelCase__ : Tuple = sum(array[:k] )
for i in range(len(UpperCamelCase__ ) - k ):
UpperCAmelCase__ : Optional[Any] = current_sum - array[i] + array[i + k]
UpperCAmelCase__ : List[Any] = max(UpperCamelCase__ , UpperCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__A =[randint(-10_00, 10_00) for i in range(1_00)]
__A =randint(0, 1_10)
print(f"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""") | 163 |
'''simple docstring'''
import torch
from torch import nn
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1 , _lowerCamelCase=False):
super().__init__()
UpperCAmelCase__ : List[Any] = n_token
UpperCAmelCase__ : Tuple = d_embed
UpperCAmelCase__ : str = d_proj
UpperCAmelCase__ : str = cutoffs + [n_token]
UpperCAmelCase__ : List[Any] = [0] + self.cutoffs
UpperCAmelCase__ : Optional[Any] = div_val
UpperCAmelCase__ : Optional[int] = self.cutoffs[0]
UpperCAmelCase__ : Optional[int] = len(self.cutoffs) - 1
UpperCAmelCase__ : Union[str, Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
UpperCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters))
UpperCAmelCase__ : int = nn.ModuleList()
UpperCAmelCase__ : List[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase)))
else:
self.out_projs.append(_lowerCamelCase)
self.out_layers.append(nn.Linear(_lowerCamelCase , _lowerCamelCase))
else:
for i in range(len(self.cutoffs)):
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Union[str, Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase)))
self.out_layers.append(nn.Linear(_lowerCamelCase , r_idx - l_idx))
UpperCAmelCase__ : Optional[int] = keep_order
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
if proj is None:
UpperCAmelCase__ : Dict = nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase__ : Optional[int] = nn.functional.linear(_lowerCamelCase , proj.t().contiguous())
UpperCAmelCase__ : List[str] = nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False):
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase__ : Optional[int] = hidden[..., :-1, :].contiguous()
UpperCAmelCase__ : int = labels[..., 1:].contiguous()
UpperCAmelCase__ : List[str] = hidden.view(-1 , hidden.size(-1))
UpperCAmelCase__ : Optional[int] = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""")
else:
UpperCAmelCase__ : Optional[int] = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
UpperCAmelCase__ : Tuple = self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
UpperCAmelCase__ : Dict = labels != -100
UpperCAmelCase__ : Tuple = torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device)
UpperCAmelCase__ : List[Any] = (
-nn.functional.log_softmax(_lowerCamelCase , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
UpperCAmelCase__ : List[str] = nn.functional.log_softmax(_lowerCamelCase , dim=-1)
else:
# construct weights and biases
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Dict = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase__ : Union[str, Any] = self.out_layers[i].weight
UpperCAmelCase__ : Any = self.out_layers[i].bias
if i == 0:
UpperCAmelCase__ : Optional[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0)
UpperCAmelCase__ : List[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(_lowerCamelCase)
biases.append(_lowerCamelCase)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase__ : Optional[int] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(_lowerCamelCase , dim=1)
if labels is None:
UpperCAmelCase__ : str = hidden.new_empty((head_logit.size(0), self.n_token))
else:
UpperCAmelCase__ : Optional[Any] = torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device)
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[str] = [0] + self.cutoffs
for i in range(len(_lowerCamelCase) - 1):
UpperCAmelCase__ , UpperCAmelCase__ : Dict = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase__ : List[str] = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase__ : str = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase__ : List[Any] = labels.index_select(0 , _lowerCamelCase) - l_idx
UpperCAmelCase__ : List[str] = head_logprob.index_select(0 , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = hidden.index_select(0 , _lowerCamelCase)
else:
UpperCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
UpperCAmelCase__ : List[Any] = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
UpperCAmelCase__ : Tuple = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase__ : int = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : str = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : int = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase__ : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
UpperCAmelCase__ : List[str] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase__ : Tuple = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""") and self.keep_order) or keep_order:
out.index_copy_(0 , _lowerCamelCase , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def snake_case__ ( self , _lowerCamelCase):
if self.n_clusters == 0:
UpperCAmelCase__ : Union[str, Any] = self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(_lowerCamelCase , dim=-1)
else:
# construct weights and biases
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase__ : int = self.out_layers[i].weight
UpperCAmelCase__ : List[str] = self.out_layers[i].bias
if i == 0:
UpperCAmelCase__ : List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0)
UpperCAmelCase__ : Optional[int] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(_lowerCamelCase)
biases.append(_lowerCamelCase)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase__ : List[Any] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0), self.n_token))
UpperCAmelCase__ : int = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : str = [0] + self.cutoffs
for i in range(len(_lowerCamelCase) - 1):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase__ : List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase__ : Union[str, Any] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : List[str] = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : Union[str, Any] = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase__ : Dict = logprob_i
return out | 163 | 1 |
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : dict ):
lowercase_ : List[Any] = set()
# edges = list of graph's edges
lowercase_ : Any = get_edges(__SCREAMING_SNAKE_CASE )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowercase_ : List[Any] = edges.pop()
chosen_vertices.add(__SCREAMING_SNAKE_CASE )
chosen_vertices.add(__SCREAMING_SNAKE_CASE )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__SCREAMING_SNAKE_CASE )
return chosen_vertices
def lowercase__( __SCREAMING_SNAKE_CASE : dict ):
lowercase_ : Optional[int] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 356 | """simple docstring"""
__SCREAMING_SNAKE_CASE ={
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
__SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()}
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
lowercase_ : Dict = ''
for word in coded.split():
while len(__SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase_ : Any = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 321 | 0 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
def decorator(snake_case__ : Optional[int] ):
_snake_case : List[Any] = getattr(snake_case__ , """handle_key""" , [] )
handle += [key]
setattr(snake_case__ , """handle_key""" , snake_case__ )
return func
return decorator
def UpperCAmelCase__ (*snake_case__ : List[str] ):
"""simple docstring"""
def decorator(snake_case__ : List[Any] ):
_snake_case : Optional[Any] = getattr(snake_case__ , """handle_key""" , [] )
handle += keys
setattr(snake_case__ , """handle_key""" , snake_case__ )
return func
return decorator
class lowercase( __a ):
'''simple docstring'''
def __new__( cls: str, a_: Optional[Any], a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = super().__new__(cls, a_, a_, a_ )
if not hasattr(a_, """key_handler""" ):
setattr(a_, """key_handler""", {} )
setattr(a_, """handle_input""", KeyHandler.handle_input )
for value in attrs.values():
_snake_case : List[Any] = getattr(a_, """handle_key""", [] )
for key in handled_keys:
_snake_case : Dict = value
return new_cls
@staticmethod
def UpperCamelCase_ ( cls: str ):
'''simple docstring'''
_snake_case : List[Any] = get_character()
if char != KEYMAP["undefined"]:
_snake_case : Union[str, Any] = ord(a_ )
_snake_case : Optional[int] = cls.key_handler.get(a_ )
if handler:
_snake_case : Optional[int] = char
return handler(cls )
else:
return None
def UpperCAmelCase__ (cls : Union[str, Any] ):
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 64 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
A_ = re.compile(r'''\s+''')
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(snake_case__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def UpperCAmelCase__ (snake_case__ : Dict ):
"""simple docstring"""
_snake_case : Any = [len(snake_case__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(snake_case__ ), "line_max": max(snake_case__ )}
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : List[str]=5 ):
"""simple docstring"""
_snake_case : Any = ["""auto-generated""", """autogenerated""", """automatically generated"""]
_snake_case : Tuple = example["""content"""].splitlines()
for _, line in zip(range(snake_case__ ) , snake_case__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Union[str, Any]=5 , snake_case__ : Any=0.05 ):
"""simple docstring"""
_snake_case : Optional[Any] = ["""unit tests""", """test file""", """configuration file"""]
_snake_case : List[Any] = example["""content"""].splitlines()
_snake_case : Dict = 0
_snake_case : str = 0
# first test
for _, line in zip(range(snake_case__ ) , snake_case__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case : Optional[int] = example["""content"""].count("""\n""" )
_snake_case : Tuple = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[int] = ["""def """, """class """, """for """, """while """]
_snake_case : str = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : List[str]=4 ):
"""simple docstring"""
_snake_case : List[Any] = example["""content"""].splitlines()
_snake_case : str = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase__ (snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : Optional[Any] = tokenizer(example["""content"""] , truncation=snake_case__ )["""input_ids"""]
_snake_case : Optional[Any] = len(example["""content"""] ) / len(snake_case__ )
return {"ratio": ratio}
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[int] = {}
results.update(get_hash(snake_case__ ) )
results.update(line_stats(snake_case__ ) )
results.update(alpha_stats(snake_case__ ) )
results.update(char_token_ratio(snake_case__ ) )
results.update(is_autogenerated(snake_case__ ) )
results.update(is_config_or_test(snake_case__ ) )
results.update(has_no_keywords(snake_case__ ) )
results.update(has_few_assignments(snake_case__ ) )
return results
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
if not check_uniques(snake_case__ , snake_case__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
with open(snake_case__ , """rb""" ) as f_in:
with gzip.open(str(snake_case__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(snake_case__ , snake_case__ )
os.unlink(snake_case__ )
# Settings
A_ = HfArgumentParser(PreprocessingArguments)
A_ = parser.parse_args()
if args.num_workers is None:
A_ = multiprocessing.cpu_count()
A_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
A_ = time.time()
A_ = load_dataset(args.dataset_name, split='''train''')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
A_ = time.time()
A_ = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
A_ = set(ds.unique('''hash'''))
A_ = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
A_ = time.time()
A_ = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
A_ = time.time()
A_ , A_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
A_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
A_ = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
A_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
A_ = str(data_dir / F'''file-{file_number+1:012}.json''')
A_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 64 | 1 |
import unittest
from transformers import DonutProcessor
UpperCamelCase__ = """naver-clova-ix/donut-base"""
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = DonutProcessor.from_pretrained(_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
__lowerCAmelCase = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
__lowerCAmelCase = self.processor.tokenajson(_A )
self.assertDictEqual(_A , _A )
| 102 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a__ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , _A=2 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = num_patches + 2
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModel(config=_A )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTForMaskedImageModeling(config=_A )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = TFDeiTForMaskedImageModeling(_A )
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = TFDeiTForImageClassification(_A )
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = TFDeiTForImageClassification(_A )
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_a : Optional[Any] = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_a : str = False
_a : str = False
_a : List[str] = False
_a : Optional[int] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Dense ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
__lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=False ):
"""simple docstring"""
__lowerCAmelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFDeiTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _a ( ):
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_A , return_tensors="tf" )
# forward pass
__lowerCAmelCase = model(**_A )
# verify the logits
__lowerCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
__lowerCAmelCase = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 102 | 1 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("""Length must be a positive.""" )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple:
if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("""Length must be a positive.""" )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | """simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowerCamelCase = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = test_results.split(' ' )
A__ = 0
A__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A__ = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCamelCase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = {}
A__ = None
A__ = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , UpperCamelCase__ ):
A__ = True
A__ = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
A__ = line
A__ = False
return failures
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = title
A__ = doc_test_results['time_spent'].split(',' )[0]
A__ = doc_test_results['success']
A__ = doc_test_results['failures']
A__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
A__ = doc_test_results
@property
def snake_case__ ( self ) -> str:
A__ = [self._time_spent]
A__ = 0
for time in time_spent:
A__ = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__UpperCAmelCase ) == 1:
A__ = [0, 0, time_parts[0]]
A__ , A__ , A__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
A__ , A__ , A__ = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'''{int(__UpperCAmelCase )}h{int(__UpperCAmelCase )}m{int(__UpperCAmelCase )}s'''
@property
def snake_case__ ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def snake_case__ ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def snake_case__ ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
f''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def snake_case__ ( self ) -> Dict:
A__ = 40
A__ = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(__UpperCAmelCase ,__UpperCAmelCase )}
A__ = ''
for category, failures in category_failures.items():
if len(__UpperCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__UpperCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def snake_case__ ( self ) -> str:
A__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__UpperCAmelCase )
@staticmethod
def snake_case__ ( ) -> Any:
A__ = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(__UpperCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,text='There was an issue running the tests.' ,blocks=__UpperCAmelCase ,)
def snake_case__ ( self ) -> int:
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
A__ = f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else 'All tests passed.'
A__ = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,blocks=self.payload ,text=__UpperCAmelCase ,)
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
A__ = ''
for key, value in failures.items():
A__ = value[:2_00] + ' [Truncated]' if len(__UpperCAmelCase ) > 2_50 else value
failures_text += f'''*{key}*\n_{value}_\n\n'''
A__ = job_name
A__ = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
A__ = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def snake_case__ ( self ) -> Dict:
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
A__ = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
A__ = sorted(self.doc_test_results.items() ,key=lambda __UpperCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
A__ = f'''*Num failures* :{len(job_result["failed"] )} \n'''
A__ = job_result['failures']
A__ = self.get_reply_blocks(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,text=__UpperCAmelCase )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,text=f'''Results for {job}''' ,blocks=__UpperCAmelCase ,thread_ts=self.thread_ts['ts'] ,)
time.sleep(1 )
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = os.environ['GITHUB_RUN_ID']
A__ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
A__ = requests.get(UpperCamelCase__ ).json()
A__ = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
A__ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(UpperCamelCase__ ):
A__ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , UpperCamelCase__ )
return {}
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = {}
if os.path.exists(UpperCamelCase__ ):
A__ = os.listdir(UpperCamelCase__ )
for file in files:
try:
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , encoding='utf-8' ) as f:
A__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(UpperCamelCase__ , UpperCamelCase__ )}.''' ) from e
return _artifact
def UpperCAmelCase ( ):
"""simple docstring"""
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ) -> Optional[Any]:
A__ = name
A__ = []
def __str__( self ) -> List[str]:
return self.name
def snake_case__ ( self ,__UpperCAmelCase ) -> int:
self.paths.append({'name': self.name, 'path': path} )
A__ = {}
A__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
A__ = directory
if artifact_name not in _available_artifacts:
A__ = Artifact(UpperCamelCase__ )
_available_artifacts[artifact_name].add_path(UpperCamelCase__ )
return _available_artifacts
if __name__ == "__main__":
__lowerCamelCase = get_job_links()
__lowerCamelCase = retrieve_available_artifacts()
__lowerCamelCase = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowerCamelCase = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowerCamelCase = github_actions_job_links.get("run_doctests")
__lowerCamelCase = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
__lowerCamelCase = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = handle_test_results(artifact["stats"])
__lowerCamelCase = failed
__lowerCamelCase = success
__lowerCamelCase = time_spent[1:-1] + ", "
__lowerCamelCase = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
__lowerCamelCase = line.replace("FAILED ", "")
__lowerCamelCase = line.split()[0].replace("\n", "")
if "::" in line:
__lowerCamelCase , __lowerCamelCase = line.split("::")
else:
__lowerCamelCase , __lowerCamelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowerCamelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowerCamelCase = all_failures[test] if test in all_failures else "N/A"
__lowerCamelCase = failure
break
__lowerCamelCase = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 221 | 0 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
A : Union[str, Any] = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
A : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def __lowerCAmelCase ( ) -> int:
__a = calculate_rouge(a__ , a__ , bootstrap_aggregation=a__ , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(a__ , a__ )
__a = calculate_rouge(a__ , a__ , bootstrap_aggregation=a__ , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def __lowerCAmelCase ( ) -> List[str]:
__a = '''rougeLsum'''
__a = calculate_rouge(a__ , a__ , newline_sep=a__ , rouge_keys=[k] )[k]
__a = calculate_rouge(a__ , a__ , newline_sep=a__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def __lowerCAmelCase ( ) -> Optional[int]:
__a = ['''rouge1''', '''rouge2''', '''rougeL''']
__a = calculate_rouge(a__ , a__ , newline_sep=a__ , rouge_keys=a__ )
__a = calculate_rouge(a__ , a__ , newline_sep=a__ , rouge_keys=a__ )
assert score_sep == score_no_sep
def __lowerCAmelCase ( ) -> Optional[int]:
__a = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
__a = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(a__ , a__ , newline_sep=a__ ) == calculate_rouge(a__ , a__ , newline_sep=a__ )
def __lowerCAmelCase ( ) -> int:
__a = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
__a = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
__a = calculate_rouge(a__ , a__ , rouge_keys=['''rougeLsum'''] , newline_sep=a__ )['''rougeLsum''']
__a = calculate_rouge(a__ , a__ , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def __lowerCAmelCase ( ) -> Any:
__a = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
__a = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(a__ , a__ )
__a = calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=a__ )
assert isinstance(a__ , a__ ) | 33 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : str = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 33 | 1 |
"""simple docstring"""
import operator as op
__snake_case : str = 'scaler.pt'
__snake_case : Optional[Any] = 'pytorch_model'
__snake_case : str = 'random_states'
__snake_case : Optional[int] = 'optimizer'
__snake_case : List[Any] = 'scheduler'
__snake_case : List[Any] = 'pytorch_model.bin'
__snake_case : Tuple = 'pytorch_model.bin.index.json'
__snake_case : Tuple = 'model.safetensors'
__snake_case : Dict = 'model.safetensors.index.json'
__snake_case : Any = '1.10.2'
__snake_case : List[str] = 'py38'
__snake_case : List[Any] = '4.17.0'
__snake_case : Any = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
__snake_case : Union[str, Any] = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
__snake_case : str = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
__snake_case : List[Any] = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
__snake_case : Any = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
__snake_case : Dict = '2.0.1'
__snake_case : Union[str, Any] = ['pdsh', 'standard', 'openmpi', 'mvapich']
__snake_case : Dict = ['default', 'reduce-overhead', 'max-autotune']
__snake_case : Tuple = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__snake_case : str = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
__snake_case : Any = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
__snake_case : int = ['DEEPSPEED', 'MULTI_XPU', 'FSDP'] | 269 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase ( unittest.TestCase):
def __init__( self : int , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]=13 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : List[Any]=2_24 , _lowerCamelCase : Tuple=30 , _lowerCamelCase : List[str]=4_00 , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : int=True , _lowerCamelCase : Any=[0.5, 0.5, 0.5] , _lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
A_ : int = size if size is not None else {'''height''': 18, '''width''': 18}
A_ : Optional[int] = parent
A_ : Any = batch_size
A_ : List[str] = num_channels
A_ : List[str] = image_size
A_ : List[Any] = min_resolution
A_ : str = max_resolution
A_ : Dict = do_resize
A_ : Dict = size
A_ : str = do_normalize
A_ : List[str] = image_mean
A_ : List[str] = image_std
def a_ ( self : Optional[Any] ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( __UpperCAmelCase , unittest.TestCase):
__lowerCAmelCase : Optional[int] = ViTImageProcessor if is_vision_available() else None
def a_ ( self : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = EfficientFormerImageProcessorTester(self )
@property
def a_ ( self : List[Any] ):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
def a_ ( self : str ):
"""simple docstring"""
pass
def a_ ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : List[str] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : Dict = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Any = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : str = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def a_ ( self : str ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : str = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : List[str] = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 167 | 0 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def A ( snake_case :int , snake_case :Optional[Any] ) -> Any:
__UpperCamelCase = set()
__UpperCamelCase = []
def parse_line(snake_case :Optional[Any] ):
for line in fp:
if isinstance(snake_case , snake_case ):
__UpperCamelCase = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(snake_case ) > 0:
__UpperCamelCase = '\n'.join(snake_case )
# Only keep the warnings specified in `targets`
if any(f': {x}: ' in warning for x in targets ):
selected_warnings.add(snake_case )
buffer.clear()
continue
else:
__UpperCamelCase = line.strip()
buffer.append(snake_case )
if from_gh:
for filename in os.listdir(snake_case ):
__UpperCamelCase = os.path.join(snake_case , snake_case )
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with open(snake_case ) as fp:
parse_line(snake_case )
else:
try:
with zipfile.ZipFile(snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with z.open(snake_case ) as fp:
parse_line(snake_case )
except Exception:
logger.warning(
f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def A ( snake_case :Optional[int] , snake_case :Optional[int] ) -> List[Any]:
__UpperCamelCase = set()
__UpperCamelCase = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) )
return selected_warnings
if __name__ == "__main__":
def A ( snake_case :int ) -> List[Any]:
return values.split(',' )
UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
# optional parameters
parser.add_argument(
"--targets",
default="DeprecationWarning,UserWarning,FutureWarning",
type=list_str,
help="Comma-separated list of target warning(s) which we want to extract.",
)
parser.add_argument(
"--from_gh",
action="store_true",
help="If running from a GitHub action workflow and collecting warnings from its artifacts.",
)
UpperCamelCase : Tuple = parser.parse_args()
UpperCamelCase : int = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCamelCase : Optional[int] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("=" * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCamelCase : Optional[int] = extract_warnings(args.output_dir, args.targets)
UpperCamelCase : Tuple = sorted(selected_warnings)
with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 263 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def A ( snake_case :List[Any] , snake_case :Dict=1 ) -> Optional[int]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def A ( snake_case :Dict , snake_case :int=0 ) -> Optional[int]:
__UpperCamelCase = []
for old_item in old_list:
__UpperCamelCase = old_item.replace('in_layers.0' , 'norm1' )
__UpperCamelCase = new_item.replace('in_layers.2' , 'conv1' )
__UpperCamelCase = new_item.replace('out_layers.0' , 'norm2' )
__UpperCamelCase = new_item.replace('out_layers.3' , 'conv2' )
__UpperCamelCase = new_item.replace('emb_layers.1' , 'time_emb_proj' )
__UpperCamelCase = new_item.replace('skip_connection' , 'conv_shortcut' )
__UpperCamelCase = shave_segments(snake_case , n_shave_prefix_segments=snake_case )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def A ( snake_case :Optional[Any] , snake_case :Tuple=0 ) -> Tuple:
__UpperCamelCase = []
for old_item in old_list:
__UpperCamelCase = old_item
__UpperCamelCase = new_item.replace('norm.weight' , 'group_norm.weight' )
__UpperCamelCase = new_item.replace('norm.bias' , 'group_norm.bias' )
__UpperCamelCase = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
__UpperCamelCase = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
__UpperCamelCase = shave_segments(snake_case , n_shave_prefix_segments=snake_case )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def A ( snake_case :int , snake_case :List[str] , snake_case :List[str] , snake_case :Any=None , snake_case :Optional[int]=None , snake_case :Union[str, Any]=None ) -> Optional[int]:
assert isinstance(snake_case , snake_case ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
__UpperCamelCase = old_checkpoint[path]
__UpperCamelCase = old_tensor.shape[0] // 3
__UpperCamelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
__UpperCamelCase = old_tensor.shape[0] // config['num_head_channels'] // 3
__UpperCamelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = old_tensor.split(channels // num_heads , dim=1 )
__UpperCamelCase = query.reshape(snake_case )
__UpperCamelCase = key.reshape(snake_case )
__UpperCamelCase = value.reshape(snake_case )
for path in paths:
__UpperCamelCase = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
__UpperCamelCase = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
__UpperCamelCase = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
__UpperCamelCase = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
__UpperCamelCase = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
__UpperCamelCase = old_checkpoint[path['old']][:, :, 0]
else:
__UpperCamelCase = old_checkpoint[path['old']]
def A ( snake_case :Optional[Any] , snake_case :Dict ) -> Optional[Any]:
__UpperCamelCase = {}
__UpperCamelCase = checkpoint['time_embed.0.weight']
__UpperCamelCase = checkpoint['time_embed.0.bias']
__UpperCamelCase = checkpoint['time_embed.2.weight']
__UpperCamelCase = checkpoint['time_embed.2.bias']
__UpperCamelCase = checkpoint['input_blocks.0.0.weight']
__UpperCamelCase = checkpoint['input_blocks.0.0.bias']
__UpperCamelCase = checkpoint['out.0.weight']
__UpperCamelCase = checkpoint['out.0.bias']
__UpperCamelCase = checkpoint['out.2.weight']
__UpperCamelCase = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
__UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
__UpperCamelCase = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(snake_case )
}
# Retrieves the keys for the middle blocks only
__UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
__UpperCamelCase = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(snake_case )
}
# Retrieves the keys for the output blocks only
__UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
__UpperCamelCase = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(snake_case )
}
for i in range(1 , snake_case ):
__UpperCamelCase = (i - 1) // (config['num_res_blocks'] + 1)
__UpperCamelCase = (i - 1) % (config['num_res_blocks'] + 1)
__UpperCamelCase = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
__UpperCamelCase = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
__UpperCamelCase = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
__UpperCamelCase = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
__UpperCamelCase = renew_resnet_paths(snake_case )
__UpperCamelCase = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
__UpperCamelCase = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
snake_case , snake_case , snake_case , additional_replacements=[meta_path, resnet_op] , config=snake_case )
if len(snake_case ):
__UpperCamelCase = renew_attention_paths(snake_case )
__UpperCamelCase = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
__UpperCamelCase = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
snake_case , snake_case , snake_case , additional_replacements=[meta_path] , attention_paths_to_split=snake_case , config=snake_case , )
__UpperCamelCase = middle_blocks[0]
__UpperCamelCase = middle_blocks[1]
__UpperCamelCase = middle_blocks[2]
__UpperCamelCase = renew_resnet_paths(snake_case )
assign_to_checkpoint(snake_case , snake_case , snake_case , config=snake_case )
__UpperCamelCase = renew_resnet_paths(snake_case )
assign_to_checkpoint(snake_case , snake_case , snake_case , config=snake_case )
__UpperCamelCase = renew_attention_paths(snake_case )
__UpperCamelCase = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
snake_case , snake_case , snake_case , attention_paths_to_split=snake_case , config=snake_case )
for i in range(snake_case ):
__UpperCamelCase = i // (config['num_res_blocks'] + 1)
__UpperCamelCase = i % (config['num_res_blocks'] + 1)
__UpperCamelCase = [shave_segments(snake_case , 2 ) for name in output_blocks[i]]
__UpperCamelCase = {}
for layer in output_block_layers:
__UpperCamelCase , __UpperCamelCase = layer.split('.' )[0], shave_segments(snake_case , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case )
else:
__UpperCamelCase = [layer_name]
if len(snake_case ) > 1:
__UpperCamelCase = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
__UpperCamelCase = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
__UpperCamelCase = renew_resnet_paths(snake_case )
__UpperCamelCase = renew_resnet_paths(snake_case )
__UpperCamelCase = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(snake_case , snake_case , snake_case , additional_replacements=[meta_path] , config=snake_case )
if ["conv.weight", "conv.bias"] in output_block_list.values():
__UpperCamelCase = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
__UpperCamelCase = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
__UpperCamelCase = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(snake_case ) == 2:
__UpperCamelCase = []
if len(snake_case ):
__UpperCamelCase = renew_attention_paths(snake_case )
__UpperCamelCase = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
__UpperCamelCase = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
snake_case , snake_case , snake_case , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=snake_case , )
else:
__UpperCamelCase = renew_resnet_paths(snake_case , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
__UpperCamelCase = '.'.join(['output_blocks', str(snake_case ), path['old']] )
__UpperCamelCase = '.'.join(['up_blocks', str(snake_case ), 'resnets', str(snake_case ), path['new']] )
__UpperCamelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
UpperCamelCase : Dict = parser.parse_args()
UpperCamelCase : Optional[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
UpperCamelCase : int = json.loads(f.read())
UpperCamelCase : Dict = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
UpperCamelCase : Optional[Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
UpperCamelCase : List[Any] = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
UpperCamelCase : Optional[int] = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
UpperCamelCase : Dict = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 263 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.