code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = IFPipeline
SCREAMING_SNAKE_CASE__ : List[str] = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
SCREAMING_SNAKE_CASE__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : int = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowercase_ ( self ) -> str:
return self._get_dummy_components()
def lowercase_ ( self , __lowercase , __lowercase=0 ) -> int:
if str(__lowercase ).startswith('''mps''' ):
lowerCAmelCase_ : Optional[Any] = torch.manual_seed(__lowercase )
else:
lowerCAmelCase_ : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowerCAmelCase_ : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowercase_ ( self ) -> Dict:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase_ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase_ ( self ) -> List[Any]:
self._test_save_load_local()
def lowercase_ ( self ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowercase_ ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> List[Any]:
# if
lowerCAmelCase_ : Optional[Any] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
lowerCAmelCase_ : str = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=__lowercase , tokenizer=__lowercase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
lowerCAmelCase_ , lowerCAmelCase_ : int = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : Dict = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__lowercase , __lowercase , __lowercase , __lowercase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowerCAmelCase_ : Optional[Any] = IFImgaImgPipeline(**pipe_a.components )
lowerCAmelCase_ : Tuple = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__lowercase , __lowercase , __lowercase , __lowercase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowerCAmelCase_ : List[Any] = IFInpaintingPipeline(**pipe_a.components )
lowerCAmelCase_ : Any = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__lowercase , __lowercase , __lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase_ : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase_ : List[str] = pipe_a(
prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , num_inference_steps=2 , generator=__lowercase , output_type='''np''' , )
lowerCAmelCase_ : Tuple = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowerCAmelCase_ : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
lowerCAmelCase_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(__lowercase , __lowercase )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase_ : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase_ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(__lowercase )
lowerCAmelCase_ : str = pipe_a(
prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , image=__lowercase , generator=__lowercase , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase_ : Optional[int] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowerCAmelCase_ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowerCAmelCase_ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]:
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase_ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(__lowercase )
lowerCAmelCase_ : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase_ : List[Any] = pipe_a(
prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , image=__lowercase , num_inference_steps=2 , generator=__lowercase , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowerCAmelCase_ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
lowerCAmelCase_ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(__lowercase , __lowercase )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase_ : int = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(__lowercase )
lowerCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(__lowercase )
lowerCAmelCase_ : List[str] = pipe_a(
prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , image=__lowercase , original_image=__lowercase , generator=__lowercase , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase_ : Any = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowerCAmelCase_ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowerCAmelCase_ : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase_ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(__lowercase )
lowerCAmelCase_ : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(__lowercase )
lowerCAmelCase_ : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase_ : Dict = pipe_a(
prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , image=__lowercase , mask_image=__lowercase , num_inference_steps=2 , generator=__lowercase , output_type='''np''' , )
lowerCAmelCase_ : Dict = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowerCAmelCase_ : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
lowerCAmelCase_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(__lowercase , __lowercase )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase_ : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase_ : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(__lowercase )
lowerCAmelCase_ : Any = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(__lowercase )
lowerCAmelCase_ : List[str] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(__lowercase )
lowerCAmelCase_ : Union[str, Any] = pipe_a(
prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , image=__lowercase , mask_image=__lowercase , original_image=__lowercase , generator=__lowercase , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase_ : Dict = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowerCAmelCase_ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowerCAmelCase_ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(__lowercase , __lowercase )
def lowerCAmelCase ( )-> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats() | 619 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]:
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss
lowerCAmelCase_ : List[Any] = num_queries
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : Dict = min_size
lowerCAmelCase_ : List[str] = max_size
lowerCAmelCase_ : Any = num_labels
lowerCAmelCase_ : str = mask_feature_size
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowercase )
lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase )
lowerCAmelCase_ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5
).float()
lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long()
lowerCAmelCase_ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs()
lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states
lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states
lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int:
with torch.no_grad():
lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase )
model.to(__lowercase )
model.eval()
def comm_check_on_output(__lowercase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Any = model(__lowercase )
comm_check_on_output(__lowercase )
lowerCAmelCase_ : List[Any] = model(
pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
comm_check_on_output(__lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Any = MaskFormerModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def lowercase_ ( self ) -> Any:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase_ ( self ) -> str:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Tuple = model_class(__lowercase )
lowerCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2
lowerCAmelCase_ : List[Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(),
}
lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase )
lowerCAmelCase_ : Dict = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase )
lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : int = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss
loss.backward()
def lowercase_ ( self ) -> Optional[int]:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : Any = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Any = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_UpperCAmelCase : Dict =1E-4
def lowerCAmelCase ( )-> Any:
lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ ( self ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase )
lowerCAmelCase_ : Dict = self.default_image_processor
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(**__lowercase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : int = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Tuple = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : Dict = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : int = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : int = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Union[str, Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase )
lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']]
lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
self.assertTrue(outputs.loss is not None ) | 619 | 1 |
from math import isclose, sqrt
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> tuple[float, float, float]:
lowerCAmelCase_ : Any = point_y / 4 / point_x
lowerCAmelCase_ : str = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase_ : Union[str, Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase_ : Optional[int] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase_ : Dict = outgoing_gradient**2 + 4
lowerCAmelCase_ : Tuple = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase_ : List[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowerCAmelCase_ : List[str] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase_ : Tuple = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase_ : Optional[Any] = x_minus if isclose(lowerCAmelCase_ , lowerCAmelCase_ ) else x_plus
lowerCAmelCase_ : List[Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowerCAmelCase ( lowerCAmelCase_ = 1.4 , lowerCAmelCase_ = -9.6 )-> int:
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : float = first_x_coord
lowerCAmelCase_ : float = first_y_coord
lowerCAmelCase_ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = next_point(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""") | 619 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
SCREAMING_SNAKE_CASE__ : str = "audio"
SCREAMING_SNAKE_CASE__ : str = "transcription"
def lowercase_ ( self , __lowercase ) -> int:
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowercase ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
lowerCAmelCase_ : List[str] = copy.deepcopy(self )
lowerCAmelCase_ : Optional[Any] = self.input_schema.copy()
lowerCAmelCase_ : Optional[Any] = features[self.audio_column]
lowerCAmelCase_ : List[str] = input_schema
return task_template
@property
def lowercase_ ( self ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"} | 619 | 1 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_UpperCAmelCase : Any ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS)
_UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_UpperCAmelCase : Any ={
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : Any = None
# source code of `config_class`
lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ )
lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase_ : List[str] = ckpt_name
break
return checkpoint
def lowerCAmelCase ( )-> Optional[Any]:
lowerCAmelCase_ : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 619 |
_UpperCAmelCase : int =frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : Dict =frozenset([])
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : Tuple =frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : str =frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""])
_UpperCAmelCase : Optional[int] =frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
_UpperCAmelCase : Optional[Any] =frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""])
_UpperCAmelCase : Union[str, Any] =frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""])
_UpperCAmelCase : Any =frozenset(["""class_labels"""])
_UpperCAmelCase : List[Any] =frozenset(["""class_labels"""])
_UpperCAmelCase : int =frozenset(["""batch_size"""])
_UpperCAmelCase : str =frozenset([])
_UpperCAmelCase : str =frozenset(["""batch_size"""])
_UpperCAmelCase : Optional[Any] =frozenset([])
_UpperCAmelCase : Tuple =frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : List[str] =frozenset(["""input_tokens"""])
_UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""]) | 619 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase : Any ={
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] =["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple =[
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_UpperCAmelCase : Optional[int] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 |
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int:
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[Any] = {1: 1}
for inputa in range(2 , lowerCAmelCase_ ):
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Dict = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ : Tuple = counter
if counter > pre_counter:
lowerCAmelCase_ : Optional[int] = inputa
lowerCAmelCase_ : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip()))) | 619 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_="pt" )-> str:
lowerCAmelCase_ : int = {'''add_prefix_space''': True} if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not line.startswith(''' ''' ) else {}
lowerCAmelCase_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCAmelCase_ , padding='''max_length''' if pad_to_max_length else None , truncation=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , )-> Any:
lowerCAmelCase_ : Dict = input_ids.ne(lowerCAmelCase_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase="train" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="" , ) -> str:
super().__init__()
lowerCAmelCase_ : List[str] = Path(__lowercase ).joinpath(type_path + '''.source''' )
lowerCAmelCase_ : Optional[Any] = Path(__lowercase ).joinpath(type_path + '''.target''' )
lowerCAmelCase_ : Optional[Any] = self.get_char_lens(self.src_file )
lowerCAmelCase_ : Dict = max_source_length
lowerCAmelCase_ : List[Any] = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
lowerCAmelCase_ : Any = tokenizer
lowerCAmelCase_ : List[Any] = prefix
if n_obs is not None:
lowerCAmelCase_ : Tuple = self.src_lens[:n_obs]
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : Tuple = tgt_lang
def __len__( self ) -> Optional[Any]:
return len(self.src_lens )
def __getitem__( self , __lowercase ) -> Dict[str, torch.Tensor]:
lowerCAmelCase_ : List[Any] = index + 1 # linecache starts at 1
lowerCAmelCase_ : Any = self.prefix + linecache.getline(str(self.src_file ) , __lowercase ).rstrip('''\n''' )
lowerCAmelCase_ : Dict = linecache.getline(str(self.tgt_file ) , __lowercase ).rstrip('''\n''' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowerCAmelCase_ : Optional[Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
)
lowerCAmelCase_ : Dict = self.tokenizer.generator if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
lowerCAmelCase_ : List[Any] = encode_line(__lowercase , __lowercase , self.max_source_length , '''right''' )
lowerCAmelCase_ : Optional[Any] = encode_line(__lowercase , __lowercase , self.max_target_length , '''right''' )
lowerCAmelCase_ : Optional[Any] = source_inputs['''input_ids'''].squeeze()
lowerCAmelCase_ : Union[str, Any] = target_inputs['''input_ids'''].squeeze()
lowerCAmelCase_ : Tuple = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase_ ( __lowercase ) -> Dict:
return [len(__lowercase ) for x in Path(__lowercase ).open().readlines()]
def lowercase_ ( self , __lowercase ) -> Dict[str, torch.Tensor]:
lowerCAmelCase_ : Union[str, Any] = torch.stack([x['''input_ids'''] for x in batch] )
lowerCAmelCase_ : str = torch.stack([x['''attention_mask'''] for x in batch] )
lowerCAmelCase_ : Dict = torch.stack([x['''decoder_input_ids'''] for x in batch] )
lowerCAmelCase_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
lowerCAmelCase_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
lowerCAmelCase_ : int = trim_batch(__lowercase , __lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = trim_batch(__lowercase , __lowercase , attention_mask=__lowercase )
lowerCAmelCase_ : Tuple = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
_UpperCAmelCase : Union[str, Any] =getLogger(__name__)
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
return list(itertools.chain.from_iterable(lowerCAmelCase_ ) )
def lowerCAmelCase ( lowerCAmelCase_ )-> None:
lowerCAmelCase_ : Tuple = get_git_info()
save_json(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , '''git_log.json''' ) )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=4 , **lowerCAmelCase_ )-> List[Any]:
with open(lowerCAmelCase_ , '''w''' ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
with open(lowerCAmelCase_ ) as f:
return json.load(lowerCAmelCase_ )
def lowerCAmelCase ( )-> List[Any]:
lowerCAmelCase_ : List[str] = git.Repo(search_parent_directories=lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = {
'''repo_id''': str(lowerCAmelCase_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List:
return list(map(lowerCAmelCase_ , lowerCAmelCase_ ) )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
with open(lowerCAmelCase_ , '''wb''' ) as f:
return pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
def remove_articles(lowerCAmelCase_ ):
return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , lowerCAmelCase_ )
def white_space_fix(lowerCAmelCase_ ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase_ ):
lowerCAmelCase_ : List[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase_ ) ) ) )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
lowerCAmelCase_ : List[str] = normalize_answer(lowerCAmelCase_ ).split()
lowerCAmelCase_ : List[str] = normalize_answer(lowerCAmelCase_ ).split()
lowerCAmelCase_ : Any = Counter(lowerCAmelCase_ ) & Counter(lowerCAmelCase_ )
lowerCAmelCase_ : int = sum(common.values() )
if num_same == 0:
return 0
lowerCAmelCase_ : List[Any] = 1.0 * num_same / len(lowerCAmelCase_ )
lowerCAmelCase_ : int = 1.0 * num_same / len(lowerCAmelCase_ )
lowerCAmelCase_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
return normalize_answer(lowerCAmelCase_ ) == normalize_answer(lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = 0
for hypo, pred in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
em += exact_match_score(lowerCAmelCase_ , lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
em /= len(lowerCAmelCase_ )
return {"em": em}
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
return model_prefix.startswith('''rag''' )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
lowerCAmelCase_ : Optional[int] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowerCAmelCase_ : Optional[int] = '''dropout_rate'''
for p in extra_params:
if getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and not hasattr(lowerCAmelCase_ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(lowerCAmelCase_ ) )
delattr(lowerCAmelCase_ , lowerCAmelCase_ )
continue
lowerCAmelCase_ : List[Any] = p if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) else equivalent_param[p]
setattr(lowerCAmelCase_ , lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
delattr(lowerCAmelCase_ , lowerCAmelCase_ )
return hparams, config | 619 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder"""
SCREAMING_SNAKE_CASE__ : str = True
def __init__( self , **__lowercase ) -> Union[str, Any]:
super().__init__(**__lowercase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase_ : str = kwargs.pop('''encoder''' )
lowerCAmelCase_ : int = encoder_config.pop('''model_type''' )
lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' )
lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : Any = True
@classmethod
def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCAmelCase_ : int = True
lowerCAmelCase_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : List[str] = self.encoder.to_dict()
lowerCAmelCase_ : Dict = self.decoder.to_dict()
lowerCAmelCase_ : Optional[Any] = self.__class__.model_type
return output | 619 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_UpperCAmelCase : List[Any] =logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BICUBIC , __lowercase = True , __lowercase = None , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = True , **__lowercase , ) -> None:
super().__init__(**__lowercase )
lowerCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 2_2_4}
lowerCAmelCase_ : Optional[int] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : Tuple = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowerCAmelCase_ : Tuple = get_size_dict(__lowercase , default_to_square=__lowercase , param_name='''crop_size''' )
lowerCAmelCase_ : Optional[int] = do_resize
lowerCAmelCase_ : Union[str, Any] = size
lowerCAmelCase_ : List[str] = resample
lowerCAmelCase_ : Dict = do_center_crop
lowerCAmelCase_ : List[str] = crop_size
lowerCAmelCase_ : List[str] = do_rescale
lowerCAmelCase_ : Any = rescale_factor
lowerCAmelCase_ : List[str] = do_normalize
lowerCAmelCase_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase_ : List[Any] = do_convert_rgb
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray:
lowerCAmelCase_ : Optional[int] = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase_ : Any = get_resize_output_image_size(__lowercase , size=size['''shortest_edge'''] , default_to_square=__lowercase )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowercase , size=(size['''height'''], size['''width''']) , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> List[str]:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
lowerCAmelCase_ : int = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Optional[int] = size if size is not None else self.size
lowerCAmelCase_ : Tuple = get_size_dict(__lowercase , param_name='''size''' , default_to_square=__lowercase )
lowerCAmelCase_ : Optional[int] = resample if resample is not None else self.resample
lowerCAmelCase_ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ : List[str] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ : Optional[int] = get_size_dict(__lowercase , param_name='''crop_size''' , default_to_square=__lowercase )
lowerCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : int = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : Optional[Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase_ : Any = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase_ : Dict = [convert_to_rgb(__lowercase ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase_ : Union[str, Any] = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
lowerCAmelCase_ : Optional[int] = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
lowerCAmelCase_ : Dict = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
lowerCAmelCase_ : Any = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
lowerCAmelCase_ : str = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
lowerCAmelCase_ : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase ) | 619 |
from __future__ import annotations
from math import pi
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = LxmertTokenizer
SCREAMING_SNAKE_CASE__ : int = LxmertTokenizerFast
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : Any = True
def lowercase_ ( self ) -> Optional[Any]:
super().setUp()
lowerCAmelCase_ : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase_ ( self , __lowercase ) -> Any:
lowerCAmelCase_ : Union[str, Any] = '''UNwant\u00E9d,running'''
lowerCAmelCase_ : Union[str, Any] = '''unwanted, running'''
return input_text, output_text
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Optional[Any] = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [7, 4, 5, 1_0, 8, 9] )
def lowercase_ ( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
lowerCAmelCase_ : List[str] = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(__lowercase )
lowerCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
lowerCAmelCase_ : List[str] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
lowerCAmelCase_ : str = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
lowerCAmelCase_ : List[str] = self.get_rust_tokenizer()
lowerCAmelCase_ : List[str] = tokenizer.encode(__lowercase )
lowerCAmelCase_ : int = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase ) | 619 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = """linear"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine"""
SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts"""
SCREAMING_SNAKE_CASE__ : List[str] = """polynomial"""
SCREAMING_SNAKE_CASE__ : Dict = """constant"""
SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup"""
SCREAMING_SNAKE_CASE__ : str = """piecewise_constant"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple:
return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) )
return 1.0
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int:
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' )
lowerCAmelCase_ : int = int(lowerCAmelCase_ )
lowerCAmelCase_ : str = float(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = value
lowerCAmelCase_ : int = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ):
def rule_func(lowerCAmelCase_ ) -> float:
lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any:
lowerCAmelCase_ : Dict = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase_ : List[Any] = lr_init - lr_end
lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps
lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] ={
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) | 619 | 1 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_UpperCAmelCase : Optional[int] =HfArgumentParser(InitializationArguments)
_UpperCAmelCase : Optional[int] =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_UpperCAmelCase : Union[str, Any] =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_UpperCAmelCase : Union[str, Any] ={
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
_UpperCAmelCase : str =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_UpperCAmelCase : int =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub) | 619 |
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
_UpperCAmelCase : Optional[Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowerCAmelCase_ ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
lowerCAmelCase_ : str = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowerCAmelCase_ : Dict = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
lowerCAmelCase_ : List[str] = PipelineDataFormat.from_str(
format=lowerCAmelCase_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(lowerCAmelCase_ , lowerCAmelCase_ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase ) -> List[Any]:
lowerCAmelCase_ : str = nlp
lowerCAmelCase_ : Any = reader
@staticmethod
def lowercase_ ( __lowercase ) -> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' )
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' )
run_parser.add_argument('''--input''' , type=__lowercase , help='''Path to the file to use for inference''' )
run_parser.add_argument('''--output''' , type=__lowercase , help='''Path to the file that will be used post to write results.''' )
run_parser.add_argument('''--model''' , type=__lowercase , help='''Name or path to the model to instantiate.''' )
run_parser.add_argument('''--config''' , type=__lowercase , help='''Name or path to the model\'s config to instantiate.''' )
run_parser.add_argument(
'''--tokenizer''' , type=__lowercase , help='''Name of the tokenizer to use. (default: same as the model name)''' )
run_parser.add_argument(
'''--column''' , type=__lowercase , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=__lowercase , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=__lowercase , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' )
run_parser.set_defaults(func=__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ , lowerCAmelCase_ : int = self._nlp, []
for entry in self._reader:
lowerCAmelCase_ : List[Any] = nlp(**__lowercase ) if self._reader.is_multi_columns else nlp(__lowercase )
if isinstance(__lowercase , __lowercase ):
outputs.append(__lowercase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowerCAmelCase_ : Tuple = self._reader.save_binary(__lowercase )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(__lowercase ) | 619 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_UpperCAmelCase : Any ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS)
_UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_UpperCAmelCase : Any ={
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : Any = None
# source code of `config_class`
lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ )
lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase_ : List[str] = ckpt_name
break
return checkpoint
def lowerCAmelCase ( )-> Optional[Any]:
lowerCAmelCase_ : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 619 | 1 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
_UpperCAmelCase : Any ={
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
lowerCAmelCase_ : Union[str, Any] = list(s_dict.keys() )
for key in keys:
lowerCAmelCase_ : Optional[Any] = r'''.*/layers_(\d+)'''
lowerCAmelCase_ : int = key
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase_ : str = re.sub(r'''layers_(\d+)''' , r'''block/\1/layer''' , lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = r'''(encoder|decoder)\/'''
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase_ : int = re.match(lowerCAmelCase_ , lowerCAmelCase_ ).groups()
if groups[0] == "encoder":
lowerCAmelCase_ : Dict = re.sub(r'''/mlp/''' , r'''/1/mlp/''' , lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/1/layer_norm/''' , lowerCAmelCase_ )
elif groups[0] == "decoder":
lowerCAmelCase_ : Optional[Any] = re.sub(r'''/mlp/''' , r'''/2/mlp/''' , lowerCAmelCase_ )
lowerCAmelCase_ : str = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/2/layer_norm/''' , lowerCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCAmelCase_ : Optional[Any] = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""{key} -> {new_key}""" )
lowerCAmelCase_ : Dict = s_dict.pop(lowerCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase_ : List[str] = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase_ : Tuple = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCAmelCase_ : Tuple = s_dict[key].shape[0]
lowerCAmelCase_ : Union[str, Any] = s_dict[key]
for idx in range(lowerCAmelCase_ ):
lowerCAmelCase_ : Union[str, Any] = expert_weihts[idx]
print(f"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(lowerCAmelCase_ )
return s_dict
_UpperCAmelCase : Dict ={
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(lowerCAmelCase_ , '''r''' ) as f:
lowerCAmelCase_ : Any = f.read()
lowerCAmelCase_ : Union[str, Any] = re.findall(r'''(.*) = ([0-9.]*)''' , lowerCAmelCase_ )
lowerCAmelCase_ : int = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCAmelCase_ : Any = float(lowerCAmelCase_ ) if '''.''' in value else int(lowerCAmelCase_ )
lowerCAmelCase_ : int = re.findall(r'''(.*activations) = \(\'(.*)\',\)''' , lowerCAmelCase_ )[0]
lowerCAmelCase_ : Tuple = str(activation[1] )
lowerCAmelCase_ : Union[str, Any] = num_experts
lowerCAmelCase_ : Optional[int] = SwitchTransformersConfig(**lowerCAmelCase_ )
return config
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_="./" , lowerCAmelCase_=8 )-> Tuple:
# Initialise PyTorch model
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
lowerCAmelCase_ : Optional[int] = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
if gin_file is not None:
lowerCAmelCase_ : Optional[int] = convert_gin_to_config(lowerCAmelCase_ , lowerCAmelCase_ )
else:
lowerCAmelCase_ : Optional[int] = SwitchTransformersConfig.from_pretrained(lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = SwitchTransformersForConditionalGeneration(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = flax_params['''target''']
lowerCAmelCase_ : Union[str, Any] = flatten_dict(lowerCAmelCase_ , sep='''/''' )
lowerCAmelCase_ : str = rename_keys(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = unflatten_dict(lowerCAmelCase_ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_UpperCAmelCase : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
_UpperCAmelCase : Optional[Any] =parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
) | 619 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple:
lowerCAmelCase_ : Optional[int] = scheduler
lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers]
lowerCAmelCase_ : str = split_batches
lowerCAmelCase_ : Any = step_with_optimizer
lowerCAmelCase_ : Optional[Any] = GradientState()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowercase , **__lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowercase , **__lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes
for _ in range(__lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowercase , **__lowercase )
else:
self.scheduler.step(*__lowercase , **__lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
return self.scheduler.get_last_lr()
def lowercase_ ( self ) -> List[str]:
return self.scheduler.state_dict()
def lowercase_ ( self , __lowercase ) -> int:
self.scheduler.load_state_dict(__lowercase )
def lowercase_ ( self ) -> Tuple:
return self.scheduler.get_lr()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> int:
return self.scheduler.print_lr(*__lowercase , **__lowercase ) | 619 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : int =logging.get_logger(__name__)
_UpperCAmelCase : List[Any] ={
"""microsoft/biogpt""": """https://huggingface.co/microsoft/biogpt/resolve/main/config.json""",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """biogpt"""
def __init__( self , __lowercase=4_2_3_8_4 , __lowercase=1_0_2_4 , __lowercase=2_4 , __lowercase=1_6 , __lowercase=4_0_9_6 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1_0_2_4 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=True , __lowercase=True , __lowercase=0.0 , __lowercase=0.0 , __lowercase=1 , __lowercase=0 , __lowercase=2 , **__lowercase , ) -> str:
lowerCAmelCase_ : Any = vocab_size
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : Dict = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Union[str, Any] = layer_norm_eps
lowerCAmelCase_ : int = scale_embedding
lowerCAmelCase_ : List[Any] = use_cache
lowerCAmelCase_ : Dict = layerdrop
lowerCAmelCase_ : Tuple = activation_dropout
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) | 619 |
from manim import *
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 )
lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
lowerCAmelCase_ : str = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 )
lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
lowerCAmelCase_ : int = []
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
rect.set_stroke(__lowercase )
lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 )
self.add(__lowercase )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 )
target.move_to(__lowercase )
ckpt_arr.append(__lowercase )
lowerCAmelCase_ : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_ : str = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 )
lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) )
lowerCAmelCase_ : int = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : int = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(FadeOut(__lowercase ) )
lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) )
self.play(
FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , )
self.wait() | 619 | 1 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_UpperCAmelCase : List[Any] ="""
import os
"""
_UpperCAmelCase : Tuple ="""
def foo():
import os
return False
"""
_UpperCAmelCase : List[Any] ="""
def foo():
def bar():
if True:
import os
return False
return bar()
"""
_UpperCAmelCase : Optional[Any] ="""
import os
try:
import bar
except ImportError:
raise ValueError()
"""
_UpperCAmelCase : Union[str, Any] ="""
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
_UpperCAmelCase : Optional[Any] ="""
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
_UpperCAmelCase : str ="""
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
_UpperCAmelCase : Any ="""
import os
try:
import bar
except:
raise ValueError()
"""
_UpperCAmelCase : List[Any] ="""
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
_UpperCAmelCase : Union[str, Any] ="""
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
_UpperCAmelCase : Union[str, Any] =[
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> str:
lowerCAmelCase_ : Dict = os.path.join(lowerCAmelCase_ , '''test_file.py''' )
with open(lowerCAmelCase_ , '''w''' ) as _tmp_file:
_tmp_file.write(lowerCAmelCase_ )
lowerCAmelCase_ : Dict = get_imports(lowerCAmelCase_ )
assert parsed_imports == ["os"] | 619 |
_UpperCAmelCase : Dict =[
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[str] = 0
while place < len(lowerCAmelCase_ ):
if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : List[Any] = []
for arabic, roman in ROMAN:
((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]:
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss
lowerCAmelCase_ : List[Any] = num_queries
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : Dict = min_size
lowerCAmelCase_ : List[str] = max_size
lowerCAmelCase_ : Any = num_labels
lowerCAmelCase_ : str = mask_feature_size
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowercase )
lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase )
lowerCAmelCase_ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5
).float()
lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long()
lowerCAmelCase_ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs()
lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states
lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states
lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int:
with torch.no_grad():
lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase )
model.to(__lowercase )
model.eval()
def comm_check_on_output(__lowercase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Any = model(__lowercase )
comm_check_on_output(__lowercase )
lowerCAmelCase_ : List[Any] = model(
pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
comm_check_on_output(__lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Any = MaskFormerModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def lowercase_ ( self ) -> Any:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase_ ( self ) -> str:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Tuple = model_class(__lowercase )
lowerCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2
lowerCAmelCase_ : List[Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(),
}
lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase )
lowerCAmelCase_ : Dict = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase )
lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : int = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss
loss.backward()
def lowercase_ ( self ) -> Optional[int]:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : Any = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Any = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_UpperCAmelCase : Dict =1E-4
def lowerCAmelCase ( )-> Any:
lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ ( self ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase )
lowerCAmelCase_ : Dict = self.default_image_processor
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(**__lowercase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : int = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Tuple = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : Dict = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : int = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : int = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Union[str, Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase )
lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']]
lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
self.assertTrue(outputs.loss is not None ) | 619 |
import csv
import tweepy
# Twitter API credentials
_UpperCAmelCase : int =""""""
_UpperCAmelCase : Optional[int] =""""""
_UpperCAmelCase : Dict =""""""
_UpperCAmelCase : str =""""""
def lowerCAmelCase ( lowerCAmelCase_ )-> None:
# authorize twitter, initialize tweepy
lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ )
auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase_ : Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# save the id of the oldest tweet less one
lowerCAmelCase_ : str = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase_ ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase_ : Optional[Any] = api.user_timeline(
screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# update the id of the oldest tweet less one
lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1
print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(lowerCAmelCase_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""") | 619 | 1 |
def lowerCAmelCase ( )-> List[Any]:
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : int = 1
while len(lowerCAmelCase_ ) < 1e6:
constant.append(str(lowerCAmelCase_ ) )
i += 1
lowerCAmelCase_ : Tuple = ''''''.join(lowerCAmelCase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9_999] )
* int(constant[99_999] )
* int(constant[999_999] )
)
if __name__ == "__main__":
print(solution()) | 619 |
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : str = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : List[Any] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Any = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) )
lowerCAmelCase_ : List[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : Tuple = 0
# filters actual prime numbers.
lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : Any = 2
lowerCAmelCase_ : List[str] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Dict = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : Union[str, Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : Any = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : List[Any] = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : Tuple = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[str] = 0
while numbera != 0:
lowerCAmelCase_ : int = numbera % numbera
lowerCAmelCase_ : Union[str, Any] = numbera
lowerCAmelCase_ : Tuple = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Dict = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Tuple = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Optional[Any] = 1
lowerCAmelCase_ : Tuple = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Any = ans
ans += fiba
lowerCAmelCase_ : Dict = tmp
return ans | 619 | 1 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_UpperCAmelCase : Union[str, Any] =logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] ={
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
_UpperCAmelCase : Optional[int] =[
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
for attribute in key.split('''.''' ):
lowerCAmelCase_ : int = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
lowerCAmelCase_ : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
lowerCAmelCase_ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCAmelCase_ : Optional[int] = value
elif weight_type == "weight_g":
lowerCAmelCase_ : int = value
elif weight_type == "weight_v":
lowerCAmelCase_ : Optional[Any] = value
elif weight_type == "bias":
lowerCAmelCase_ : Dict = value
else:
lowerCAmelCase_ : Union[str, Any] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : str = fairseq_model.state_dict()
lowerCAmelCase_ : Optional[Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase_ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == '''group''' , )
lowerCAmelCase_ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCAmelCase_ : Any = True
if "*" in mapped_key:
lowerCAmelCase_ : str = name.split(lowerCAmelCase_ )[0].split('''.''' )[-2]
lowerCAmelCase_ : str = mapped_key.replace('''*''' , lowerCAmelCase_ )
if "weight_g" in name:
lowerCAmelCase_ : Optional[int] = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase_ : Union[str, Any] = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
lowerCAmelCase_ : Optional[Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase_ : Tuple = '''weight'''
else:
lowerCAmelCase_ : int = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : Tuple = full_name.split('''conv_layers.''' )[-1]
lowerCAmelCase_ : int = name.split('''.''' )
lowerCAmelCase_ : List[str] = int(items[0] )
lowerCAmelCase_ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCAmelCase_ : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCAmelCase_ : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCAmelCase_ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCAmelCase_ : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None )-> Tuple:
# load the pre-trained checkpoints
lowerCAmelCase_ : Any = torch.load(lowerCAmelCase_ )
lowerCAmelCase_ : Dict = WavLMConfigOrig(checkpoint['''cfg'''] )
lowerCAmelCase_ : Union[str, Any] = WavLMOrig(lowerCAmelCase_ )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
lowerCAmelCase_ : Any = WavLMConfig.from_pretrained(lowerCAmelCase_ )
else:
lowerCAmelCase_ : Tuple = WavLMConfig()
lowerCAmelCase_ : Optional[Any] = WavLMModel(lowerCAmelCase_ )
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ )
hf_wavlm.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_UpperCAmelCase : List[str] =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_UpperCAmelCase : List[Any] =parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 619 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_UpperCAmelCase : Tuple =10
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
if array[i] == target:
return i
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : int = len(lowerCAmelCase_ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1
lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : Dict = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : List[Any] = two_third + 1
else:
lowerCAmelCase_ : Union[str, Any] = one_third + 1
lowerCAmelCase_ : Tuple = two_third - 1
else:
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1
lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
_UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip())
_UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target)
_UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""") | 619 | 1 |
_UpperCAmelCase : List[str] =6_5521
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = 1
lowerCAmelCase_ : str = 0
for plain_chr in plain_text:
lowerCAmelCase_ : Dict = (a + ord(lowerCAmelCase_ )) % MOD_ADLER
lowerCAmelCase_ : int = (b + a) % MOD_ADLER
return (b << 16) | a | 619 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Union[str, Any] ={
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple =[
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCAmelCase : Tuple ={
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] =[
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_UpperCAmelCase : Any =False
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> Union[str, Any]:
return 1_2
@property
def lowercase_ ( self ) -> Any:
return 1_2
@property
def lowercase_ ( self ) -> Optional[Any]:
return 3_2
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Any = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__lowercase )
@property
def lowercase_ ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = 1_2
lowerCAmelCase_ : int = 1_2
lowerCAmelCase_ : Union[str, Any] = {
'''attention_bias''': True,
'''cross_attention_dim''': 3_2,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 3_2,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase )
return model
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : List[Any] = '''cpu'''
lowerCAmelCase_ : Any = self.dummy_vqvae
lowerCAmelCase_ : str = self.dummy_text_encoder
lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer
lowerCAmelCase_ : int = self.dummy_transformer
lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase )
lowerCAmelCase_ : Dict = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : int = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : List[Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = '''cpu'''
lowerCAmelCase_ : str = self.dummy_vqvae
lowerCAmelCase_ : Dict = self.dummy_text_encoder
lowerCAmelCase_ : List[Any] = self.dummy_tokenizer
lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer
lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings(
learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowerCAmelCase_ : List[str] = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : str = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Optional[int] = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0 | 619 | 1 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ : Optional[Any] = 10
def lowercase_ ( self , **__lowercase ) -> Tuple:
lowerCAmelCase_ : Optional[Any] = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**__lowercase )
return config
def lowercase_ ( self ) -> Union[str, Any]:
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowercase )
def lowercase_ ( self ) -> str:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__lowercase , beta_end=__lowercase )
def lowercase_ ( self ) -> List[str]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowercase )
def lowercase_ ( self ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : str = self.scheduler_classes[0]
lowerCAmelCase_ : int = self.get_scheduler_config()
lowerCAmelCase_ : int = scheduler_class(**__lowercase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase_ : List[Any] = torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = self.dummy_model()
lowerCAmelCase_ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase_ : List[Any] = sample.to(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Dict = scheduler.scale_model_input(__lowercase , __lowercase )
lowerCAmelCase_ : str = model(__lowercase , __lowercase )
lowerCAmelCase_ : str = scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase )
lowerCAmelCase_ : List[str] = output.prev_sample
lowerCAmelCase_ : Any = torch.sum(torch.abs(__lowercase ) )
lowerCAmelCase_ : Optional[int] = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 10.08_07 ) < 1e-2
assert abs(result_mean.item() - 0.01_31 ) < 1e-3
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : str = self.scheduler_classes[0]
lowerCAmelCase_ : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase_ : Tuple = scheduler_class(**__lowercase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase_ : Tuple = torch.manual_seed(0 )
lowerCAmelCase_ : int = self.dummy_model()
lowerCAmelCase_ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase_ : List[str] = sample.to(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : List[Any] = scheduler.scale_model_input(__lowercase , __lowercase )
lowerCAmelCase_ : int = model(__lowercase , __lowercase )
lowerCAmelCase_ : str = scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase )
lowerCAmelCase_ : str = output.prev_sample
lowerCAmelCase_ : int = torch.sum(torch.abs(__lowercase ) )
lowerCAmelCase_ : Any = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 0.00_02 ) < 1e-2
assert abs(result_mean.item() - 2.2_676e-06 ) < 1e-3
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Dict = self.scheduler_classes[0]
lowerCAmelCase_ : Union[str, Any] = self.get_scheduler_config()
lowerCAmelCase_ : Tuple = scheduler_class(**__lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowercase )
lowerCAmelCase_ : Tuple = torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = self.dummy_model()
lowerCAmelCase_ : int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCAmelCase_ : Optional[Any] = sample.to(__lowercase )
for t in scheduler.timesteps:
lowerCAmelCase_ : Union[str, Any] = scheduler.scale_model_input(__lowercase , __lowercase )
lowerCAmelCase_ : Any = model(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[int] = scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase )
lowerCAmelCase_ : Optional[Any] = output.prev_sample
lowerCAmelCase_ : Optional[int] = torch.sum(torch.abs(__lowercase ) )
lowerCAmelCase_ : str = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 10.08_07 ) < 1e-2
assert abs(result_mean.item() - 0.01_31 ) < 1e-3
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase_ : Dict = self.get_scheduler_config()
lowerCAmelCase_ : int = scheduler_class(**__lowercase , use_karras_sigmas=__lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowercase )
lowerCAmelCase_ : Dict = torch.manual_seed(0 )
lowerCAmelCase_ : int = self.dummy_model()
lowerCAmelCase_ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCAmelCase_ : Union[str, Any] = sample.to(__lowercase )
for t in scheduler.timesteps:
lowerCAmelCase_ : List[str] = scheduler.scale_model_input(__lowercase , __lowercase )
lowerCAmelCase_ : List[Any] = model(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[int] = scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase )
lowerCAmelCase_ : Dict = output.prev_sample
lowerCAmelCase_ : Optional[Any] = torch.sum(torch.abs(__lowercase ) )
lowerCAmelCase_ : Union[str, Any] = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19 ) < 1e-2
assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63 ) < 1e-3 | 619 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase : Dict =None
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : Dict ={
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase : Tuple ="""▁"""
# Segments (not really needed)
_UpperCAmelCase : str =0
_UpperCAmelCase : List[str] =1
_UpperCAmelCase : int =2
_UpperCAmelCase : Any =3
_UpperCAmelCase : List[Any] =4
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = """left"""
SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : Dict = do_lower_case
lowerCAmelCase_ : Dict = remove_space
lowerCAmelCase_ : List[str] = keep_accents
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : str = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 619 | 1 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_UpperCAmelCase : Union[str, Any] =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowercase , **__lowercase ) -> None:
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase ) | 619 |
import math
import qiskit
def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts:
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' )
lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowerCAmelCase_ : Any = [input_a, input_a, carry_in]
lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits
lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""") | 619 | 1 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : int =logging.get_logger(__name__)
_UpperCAmelCase : Tuple ={name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
lowerCAmelCase_ : Tuple = TOKENIZER_CLASSES
else:
lowerCAmelCase_ : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase_ , tokenizer_name + '''Fast''' )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase_ : Optional[Any] = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase_ : List[str] = True
if checkpoint_name is None:
lowerCAmelCase_ : Tuple = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase_ : Union[str, Any] = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
lowerCAmelCase_ : str = tokenizer_class.from_pretrained(lowerCAmelCase_ , force_download=lowerCAmelCase_ )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = checkpoint.split('''/''' )
lowerCAmelCase_ : Dict = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
elif add_prefix:
lowerCAmelCase_ : int = checkpoint
lowerCAmelCase_ : Any = dump_path
else:
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Any = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase_ : List[str] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase_ : List[str] = file_path.split(lowerCAmelCase_ )[-1][0]
if next_char == "/":
lowerCAmelCase_ : Dict = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
lowerCAmelCase_ : List[Any] = tokenizer.save_pretrained(
lowerCAmelCase_ , legacy_format=lowerCAmelCase_ , filename_prefix=lowerCAmelCase_ )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(lowerCAmelCase_ )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
_UpperCAmelCase : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
_UpperCAmelCase : Optional[int] =parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download) | 619 |
import re
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 619 | 1 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
_UpperCAmelCase : int ={
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 4_8000,
"""sample_size""": 6_5536,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 4_8000,
"""sample_size""": 6_5536,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 4_8000,
"""sample_size""": 13_1072,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 1_6000,
"""sample_size""": 6_5536,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 1_6000,
"""sample_size""": 6_5536,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 1_6000,
"""sample_size""": 6_5536,
},
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
return torch.atana(lowerCAmelCase_ , lowerCAmelCase_ ) / math.pi * 2
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
lowerCAmelCase_ : List[Any] = torch.sin(t * math.pi / 2 ) ** 2
lowerCAmelCase_ : str = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCAmelCase_ , lowerCAmelCase_ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
pass
class snake_case__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowercase ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase_ : Tuple = DiffusionAttnUnetaD(__lowercase , n_attn_layers=4 )
lowerCAmelCase_ : Tuple = deepcopy(self.diffusion )
lowerCAmelCase_ : List[str] = torch.quasirandom.SobolEngine(1 , scramble=__lowercase )
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
lowerCAmelCase_ : Dict = MODELS_MAP[model_name]['''url''']
os.system(f"""wget {url} ./""" )
return f"""./{model_name}.ckpt"""
_UpperCAmelCase : Dict ={
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
_UpperCAmelCase : int ={
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
_UpperCAmelCase : Tuple ={
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
_UpperCAmelCase : Dict ={
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
_UpperCAmelCase : Tuple ={
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
_UpperCAmelCase : Optional[int] ={
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(f"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(lowerCAmelCase_ ) and not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return name.replace(lowerCAmelCase_ , lowerCAmelCase_ )
elif name.startswith(lowerCAmelCase_ ):
return [name.replace(lowerCAmelCase_ , lowerCAmelCase_ ) for v in value]
raise ValueError(f"""Attn error with {name}""" )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=13 )-> Any:
lowerCAmelCase_ : Dict = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
lowerCAmelCase_ : List[Any] = 0
if string.startswith('''net.3.''' ):
depth += 1
lowerCAmelCase_ : Optional[Any] = string[6:]
elif string.startswith('''net.''' ):
lowerCAmelCase_ : str = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
lowerCAmelCase_ : Dict = string[7:]
if string.startswith('''main.''' ):
lowerCAmelCase_ : Any = string[5:]
# mid block
if string[:2].isdigit():
lowerCAmelCase_ : Optional[int] = string[:2]
lowerCAmelCase_ : Union[str, Any] = string[2:]
else:
lowerCAmelCase_ : Any = string[0]
lowerCAmelCase_ : List[str] = string[1:]
if depth == max_depth:
lowerCAmelCase_ : List[Any] = MID_NUM_TO_LAYER[layer_num]
lowerCAmelCase_ : int = '''mid_block'''
elif depth > 0 and int(lowerCAmelCase_ ) < 7:
lowerCAmelCase_ : Dict = DOWN_NUM_TO_LAYER[layer_num]
lowerCAmelCase_ : int = f"""down_blocks.{depth}"""
elif depth > 0 and int(lowerCAmelCase_ ) > 7:
lowerCAmelCase_ : List[str] = UP_NUM_TO_LAYER[layer_num]
lowerCAmelCase_ : Union[str, Any] = f"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
lowerCAmelCase_ : str = DEPTH_0_TO_LAYER[layer_num]
lowerCAmelCase_ : str = f"""up_blocks.{max_depth - 1}""" if int(lowerCAmelCase_ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" )
lowerCAmelCase_ : Tuple = string_left[1:]
if "resnets" in new_layer:
lowerCAmelCase_ : Tuple = convert_resconv_naming(lowerCAmelCase_ )
elif "attentions" in new_layer:
lowerCAmelCase_ : int = convert_attn_naming(lowerCAmelCase_ )
lowerCAmelCase_ : int = new_string_left
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[int] = prefix + '''.''' + new_layer + '''.''' + string_left
else:
lowerCAmelCase_ : str = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
lowerCAmelCase_ : Optional[int] = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
lowerCAmelCase_ : Any = rename(lowerCAmelCase_ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[Any] = transform_conv_attns(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
lowerCAmelCase_ : List[str] = v
return new_state_dict
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
if len(lowerCAmelCase_ ) == 1:
if len(v.shape ) == 3:
# weight
lowerCAmelCase_ : Any = v[:, :, 0]
else:
# bias
lowerCAmelCase_ : Union[str, Any] = v
else:
# qkv matrices
lowerCAmelCase_ : Dict = v.shape[0]
lowerCAmelCase_ : List[str] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
lowerCAmelCase_ : Any = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
lowerCAmelCase_ : List[str] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowerCAmelCase_ : List[Any] = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
lowerCAmelCase_ : Optional[int] = download(lowerCAmelCase_ )
lowerCAmelCase_ : Any = MODELS_MAP[model_name]['''sample_rate''']
lowerCAmelCase_ : Union[str, Any] = MODELS_MAP[model_name]['''sample_size''']
lowerCAmelCase_ : Dict = Object()
lowerCAmelCase_ : List[Any] = sample_size
lowerCAmelCase_ : Any = sample_rate
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Optional[Any] = UNetaDModel(sample_size=lowerCAmelCase_ , sample_rate=lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = diffusers_model.state_dict()
lowerCAmelCase_ : Optional[Any] = DiffusionUncond(lowerCAmelCase_ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCAmelCase_ )['''state_dict'''] )
lowerCAmelCase_ : Optional[Any] = orig_model.diffusion_ema.eval()
lowerCAmelCase_ : Tuple = orig_model.state_dict()
lowerCAmelCase_ : Tuple = rename_orig_weights(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
lowerCAmelCase_ : str = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCAmelCase_ ) == 0, f"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('''kernel''' ) for k in list(lowerCAmelCase_ ) ), f"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
lowerCAmelCase_ : Tuple = value.squeeze()
lowerCAmelCase_ : int = value
diffusers_model.load_state_dict(lowerCAmelCase_ )
lowerCAmelCase_ : str = 100
lowerCAmelCase_ : Optional[Any] = 33
lowerCAmelCase_ : List[str] = IPNDMScheduler(num_train_timesteps=lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = torch.manual_seed(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = torch.randn([1, 2, config.sample_size] , generator=lowerCAmelCase_ ).to(lowerCAmelCase_ )
lowerCAmelCase_ : str = torch.linspace(1 , 0 , steps + 1 , device=lowerCAmelCase_ )[:-1]
lowerCAmelCase_ : List[Any] = get_crash_schedule(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = DanceDiffusionPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = torch.manual_seed(33 )
lowerCAmelCase_ : List[Any] = pipe(num_inference_steps=lowerCAmelCase_ , generator=lowerCAmelCase_ ).audios
lowerCAmelCase_ : List[str] = sampling.iplms_sample(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {} )
lowerCAmelCase_ : Optional[int] = generated.clamp(-1 , 1 )
lowerCAmelCase_ : Tuple = (generated - audio).abs().sum()
lowerCAmelCase_ : Any = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , lowerCAmelCase_ )
print('''Diff max''' , lowerCAmelCase_ )
assert diff_max < 1e-3, f"""Diff max: {diff_max} is too much :-/"""
print(f"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
_UpperCAmelCase : List[str] =argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_UpperCAmelCase : Dict =parser.parse_args()
main(args) | 619 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4}
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = do_resize
lowerCAmelCase_ : Optional[int] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCAmelCase_ : Tuple = resample
lowerCAmelCase_ : Optional[int] = do_rescale
lowerCAmelCase_ : Any = rescale_factor
lowerCAmelCase_ : List[str] = do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray:
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowerCAmelCase_ : Optional[int] = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct )
lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase_ : str = resample if resample is not None else self.resample
lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : int = size if size is not None else self.size
lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_rescale:
lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
lowerCAmelCase_ : Dict = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase ) | 619 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : List[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : Any = BlipImageProcessor()
lowerCAmelCase_ : Any = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
lowerCAmelCase_ : Tuple = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
lowerCAmelCase_ : str = InstructBlipProcessor(__lowercase , __lowercase , __lowercase )
processor.save_pretrained(self.tmpdirname )
def lowercase_ ( self , **__lowercase ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).tokenizer
def lowercase_ ( self , **__lowercase ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).image_processor
def lowercase_ ( self , **__lowercase ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).qformer_tokenizer
def lowercase_ ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase_ : str = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCAmelCase_ : Any = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
lowerCAmelCase_ : Tuple = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
self.assertIsInstance(processor.qformer_tokenizer , __lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[int] = self.get_image_processor()
lowerCAmelCase_ : List[Any] = self.get_tokenizer()
lowerCAmelCase_ : Optional[Any] = self.get_qformer_tokenizer()
lowerCAmelCase_ : Optional[int] = InstructBlipProcessor(
tokenizer=__lowercase , image_processor=__lowercase , qformer_tokenizer=__lowercase )
lowerCAmelCase_ : List[Any] = self.prepare_image_inputs()
lowerCAmelCase_ : Optional[int] = image_processor(__lowercase , return_tensors='''np''' )
lowerCAmelCase_ : Dict = processor(images=__lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Dict = self.get_image_processor()
lowerCAmelCase_ : Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ : str = self.get_qformer_tokenizer()
lowerCAmelCase_ : int = InstructBlipProcessor(
tokenizer=__lowercase , image_processor=__lowercase , qformer_tokenizer=__lowercase )
lowerCAmelCase_ : Optional[int] = '''lower newer'''
lowerCAmelCase_ : List[Any] = processor(text=__lowercase )
lowerCAmelCase_ : int = tokenizer(__lowercase , return_token_type_ids=__lowercase )
lowerCAmelCase_ : List[Any] = qformer_tokenizer(__lowercase , return_token_type_ids=__lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Dict = self.get_image_processor()
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase_ : List[Any] = self.get_qformer_tokenizer()
lowerCAmelCase_ : Optional[Any] = InstructBlipProcessor(
tokenizer=__lowercase , image_processor=__lowercase , qformer_tokenizer=__lowercase )
lowerCAmelCase_ : Dict = '''lower newer'''
lowerCAmelCase_ : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase_ : Optional[int] = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : int = self.get_image_processor()
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : int = self.get_qformer_tokenizer()
lowerCAmelCase_ : Optional[int] = InstructBlipProcessor(
tokenizer=__lowercase , image_processor=__lowercase , qformer_tokenizer=__lowercase )
lowerCAmelCase_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ : Tuple = processor.batch_decode(__lowercase )
lowerCAmelCase_ : Dict = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : str = self.get_image_processor()
lowerCAmelCase_ : Optional[int] = self.get_tokenizer()
lowerCAmelCase_ : Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase_ : List[str] = InstructBlipProcessor(
tokenizer=__lowercase , image_processor=__lowercase , qformer_tokenizer=__lowercase )
lowerCAmelCase_ : Union[str, Any] = '''lower newer'''
lowerCAmelCase_ : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase_ : Tuple = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , ) | 619 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] ={
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese"""
def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : str = intermediate_multiple_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Dict = rotary_pct
lowerCAmelCase_ : Union[str, Any] = rotary_emb_base
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : Dict = hidden_dropout | 619 | 1 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_UpperCAmelCase : str =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
_UpperCAmelCase : int =parser.parse_args()
_UpperCAmelCase : Tuple =download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 619 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]:
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss
lowerCAmelCase_ : List[Any] = num_queries
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : Dict = min_size
lowerCAmelCase_ : List[str] = max_size
lowerCAmelCase_ : Any = num_labels
lowerCAmelCase_ : str = mask_feature_size
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowercase )
lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase )
lowerCAmelCase_ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5
).float()
lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long()
lowerCAmelCase_ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs()
lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states
lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states
lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int:
with torch.no_grad():
lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase )
model.to(__lowercase )
model.eval()
def comm_check_on_output(__lowercase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Any = model(__lowercase )
comm_check_on_output(__lowercase )
lowerCAmelCase_ : List[Any] = model(
pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
comm_check_on_output(__lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Any = MaskFormerModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def lowercase_ ( self ) -> Any:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase_ ( self ) -> str:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Tuple = model_class(__lowercase )
lowerCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2
lowerCAmelCase_ : List[Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(),
}
lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase )
lowerCAmelCase_ : Dict = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase )
lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : int = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss
loss.backward()
def lowercase_ ( self ) -> Optional[int]:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : Any = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Any = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_UpperCAmelCase : Dict =1E-4
def lowerCAmelCase ( )-> Any:
lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ ( self ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase )
lowerCAmelCase_ : Dict = self.default_image_processor
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(**__lowercase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : int = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Tuple = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : Dict = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : int = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : int = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Union[str, Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase )
lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']]
lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
self.assertTrue(outputs.loss is not None ) | 619 | 1 |
import math
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
lowerCAmelCase_ : int = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ = 1 / 12_345 )-> int:
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Union[str, Any] = 3
while True:
lowerCAmelCase_ : Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCAmelCase_ ):
lowerCAmelCase_ : Dict = int(lowerCAmelCase_ )
total_partitions += 1
if check_partition_perfect(lowerCAmelCase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCAmelCase_ )
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""") | 619 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
SCREAMING_SNAKE_CASE__ : str = "audio"
SCREAMING_SNAKE_CASE__ : str = "transcription"
def lowercase_ ( self , __lowercase ) -> int:
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowercase ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
lowerCAmelCase_ : List[str] = copy.deepcopy(self )
lowerCAmelCase_ : Optional[Any] = self.input_schema.copy()
lowerCAmelCase_ : Optional[Any] = features[self.audio_column]
lowerCAmelCase_ : List[str] = input_schema
return task_template
@property
def lowercase_ ( self ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"} | 619 | 1 |
def lowerCAmelCase ( lowerCAmelCase_ = 100 )-> int:
lowerCAmelCase_ : Dict = set()
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : str = n + 1 # maximum limit
for a in range(2 , lowerCAmelCase_ ):
for b in range(2 , lowerCAmelCase_ ):
lowerCAmelCase_ : Tuple = a**b # calculates the current power
collect_powers.add(lowerCAmelCase_ ) # adds the result to the set
return len(lowerCAmelCase_ )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip()))) | 619 |
_UpperCAmelCase : int =frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : Dict =frozenset([])
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : Tuple =frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : str =frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""])
_UpperCAmelCase : Optional[int] =frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
_UpperCAmelCase : Optional[Any] =frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""])
_UpperCAmelCase : Union[str, Any] =frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""])
_UpperCAmelCase : Any =frozenset(["""class_labels"""])
_UpperCAmelCase : List[Any] =frozenset(["""class_labels"""])
_UpperCAmelCase : int =frozenset(["""batch_size"""])
_UpperCAmelCase : str =frozenset([])
_UpperCAmelCase : str =frozenset(["""batch_size"""])
_UpperCAmelCase : Optional[Any] =frozenset([])
_UpperCAmelCase : Tuple =frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : List[str] =frozenset(["""input_tokens"""])
_UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""]) | 619 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Union[str, Any] ={
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple =[
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 |
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int:
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[Any] = {1: 1}
for inputa in range(2 , lowerCAmelCase_ ):
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Dict = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ : Tuple = counter
if counter > pre_counter:
lowerCAmelCase_ : Optional[int] = inputa
lowerCAmelCase_ : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip()))) | 619 | 1 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
_UpperCAmelCase : Union[str, Any] ="""\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
_UpperCAmelCase : Optional[Any] ="""\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
_UpperCAmelCase : Optional[int] ="""
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple:
return float((preds == labels).mean() )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
lowerCAmelCase_ : Any = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = float(fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
lowerCAmelCase_ : Any = np.array(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = np.array(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = en_sentvecs.shape[0]
# mean centering
lowerCAmelCase_ : int = en_sentvecs - np.mean(lowerCAmelCase_ , axis=0 )
lowerCAmelCase_ : Dict = in_sentvecs - np.mean(lowerCAmelCase_ , axis=0 )
lowerCAmelCase_ : List[Any] = cdist(lowerCAmelCase_ , lowerCAmelCase_ , '''cosine''' )
lowerCAmelCase_ : Optional[Any] = np.array(range(lowerCAmelCase_ ) )
lowerCAmelCase_ : Any = sim.argsort(axis=1 )[:, :10]
lowerCAmelCase_ : List[str] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case__( datasets.Metric ):
'''simple docstring'''
def lowercase_ ( self ) -> Tuple:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def lowercase_ ( self , __lowercase , __lowercase ) -> Union[str, Any]:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__lowercase , __lowercase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__lowercase , __lowercase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' ) | 619 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder"""
SCREAMING_SNAKE_CASE__ : str = True
def __init__( self , **__lowercase ) -> Union[str, Any]:
super().__init__(**__lowercase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase_ : str = kwargs.pop('''encoder''' )
lowerCAmelCase_ : int = encoder_config.pop('''model_type''' )
lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' )
lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : Any = True
@classmethod
def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCAmelCase_ : int = True
lowerCAmelCase_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : List[str] = self.encoder.to_dict()
lowerCAmelCase_ : Dict = self.decoder.to_dict()
lowerCAmelCase_ : Optional[Any] = self.__class__.model_type
return output | 619 | 1 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : int ={
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_UpperCAmelCase : List[str] =[
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
for attribute in key.split('''.''' ):
lowerCAmelCase_ : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
lowerCAmelCase_ : Any = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
lowerCAmelCase_ : int = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCAmelCase_ : Optional[Any] = value
elif weight_type == "weight_g":
lowerCAmelCase_ : Any = value
elif weight_type == "weight_v":
lowerCAmelCase_ : List[str] = value
elif weight_type == "bias":
lowerCAmelCase_ : Tuple = value
else:
lowerCAmelCase_ : str = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : List[str] = fairseq_model.state_dict()
lowerCAmelCase_ : Dict = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCAmelCase_ : Any = None
for name, value in fairseq_dict.items():
lowerCAmelCase_ : int = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == '''group''' , )
lowerCAmelCase_ : List[str] = True
elif name.split('''.''' )[0] == "proj":
lowerCAmelCase_ : int = fairseq_model.proj
lowerCAmelCase_ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCAmelCase_ : Any = True
if "*" in mapped_key:
lowerCAmelCase_ : str = name.split(lowerCAmelCase_ )[0].split('''.''' )[-2]
lowerCAmelCase_ : Optional[Any] = mapped_key.replace('''*''' , lowerCAmelCase_ )
if "weight_g" in name:
lowerCAmelCase_ : List[str] = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase_ : str = '''weight_v'''
elif "bias" in name:
lowerCAmelCase_ : Any = '''bias'''
elif "weight" in name:
lowerCAmelCase_ : List[Any] = '''weight'''
else:
lowerCAmelCase_ : Any = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
lowerCAmelCase_ : List[str] = full_name.split('''conv_layers.''' )[-1]
lowerCAmelCase_ : Optional[int] = name.split('''.''' )
lowerCAmelCase_ : Any = int(items[0] )
lowerCAmelCase_ : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCAmelCase_ : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCAmelCase_ : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCAmelCase_ : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCAmelCase_ : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ )-> Any:
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = emb.weight.shape
lowerCAmelCase_ : List[str] = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = emb.weight.data
return lin_layer
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase_ : Optional[Any] = f.readlines()
lowerCAmelCase_ : str = [line.split(''' ''' )[0] for line in lines]
lowerCAmelCase_ : Optional[Any] = len(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(lowerCAmelCase_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> Tuple:
lowerCAmelCase_ : int = WavaVecaConfig.from_pretrained(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = SpeechaTextaConfig.from_pretrained(
lowerCAmelCase_ , vocab_size=lowerCAmelCase_ , decoder_layers=lowerCAmelCase_ , do_stable_layer_norm=lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
lowerCAmelCase_ : Dict = model[0].eval()
# set weights for wav2vec2 encoder
lowerCAmelCase_ : Tuple = WavaVecaModel(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = recursively_load_weights_wavaveca(model.encoder , lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = SpeechaTextaForCausalLM(lowerCAmelCase_ )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCAmelCase_ )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
lowerCAmelCase_ : Any = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
lowerCAmelCase_ : int = SpeechEncoderDecoderModel(encoder=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = False
# add projection layer
lowerCAmelCase_ : Any = nn.Parameter(projection_layer.weight )
lowerCAmelCase_ : Optional[Any] = nn.Parameter(projection_layer.bias )
lowerCAmelCase_ : Any = create_vocab_dict(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = SpeechaTextaTokenizer(os.path.join(lowerCAmelCase_ , '''vocab.json''' ) )
tokenizer.save_pretrained(lowerCAmelCase_ )
lowerCAmelCase_ : int = hf_wavavec.config.to_dict()
lowerCAmelCase_ : List[Any] = tokenizer.pad_token_id
lowerCAmelCase_ : List[str] = tokenizer.bos_token_id
lowerCAmelCase_ : Optional[int] = tokenizer.eos_token_id
lowerCAmelCase_ : Optional[int] = '''speech_to_text_2'''
lowerCAmelCase_ : Tuple = '''wav2vec2'''
lowerCAmelCase_ : Optional[int] = SpeechEncoderDecoderConfig.from_dict(lowerCAmelCase_ )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
feature_extractor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
_UpperCAmelCase : Tuple =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 619 |
from __future__ import annotations
from math import pi
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = StableDiffusionSAGPipeline
SCREAMING_SNAKE_CASE__ : Optional[Any] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[int] = False
def lowercase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowerCAmelCase_ : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
lowerCAmelCase_ : List[Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowerCAmelCase_ : Tuple = CLIPTextModel(__lowercase )
lowerCAmelCase_ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCAmelCase_ : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self , __lowercase , __lowercase=0 ) -> Tuple:
if str(__lowercase ).startswith('''mps''' ):
lowerCAmelCase_ : List[str] = torch.manual_seed(__lowercase )
else:
lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowerCAmelCase_ : Any = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : List[str] = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
lowerCAmelCase_ : Optional[int] = sag_pipe.to(__lowercase )
sag_pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : List[Any] = '''.'''
lowerCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = sag_pipe(
[prompt] , generator=__lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='''np''' )
lowerCAmelCase_ : List[str] = output.images
lowerCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase_ : Any = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : List[str] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowerCAmelCase_ : int = sag_pipe.to(__lowercase )
sag_pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''.'''
lowerCAmelCase_ : str = torch.manual_seed(0 )
lowerCAmelCase_ : Dict = sag_pipe(
[prompt] , generator=__lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='''np''' )
lowerCAmelCase_ : Optional[int] = output.images
lowerCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase_ : Tuple = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : Tuple = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowerCAmelCase_ : Tuple = sag_pipe.to(__lowercase )
sag_pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''.'''
lowerCAmelCase_ : Dict = torch.manual_seed(0 )
lowerCAmelCase_ : Dict = sag_pipe(
[prompt] , width=7_6_8 , height=5_1_2 , generator=__lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='''np''' , )
lowerCAmelCase_ : Tuple = output.images
assert image.shape == (1, 5_1_2, 7_6_8, 3) | 619 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = """linear"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine"""
SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts"""
SCREAMING_SNAKE_CASE__ : List[str] = """polynomial"""
SCREAMING_SNAKE_CASE__ : Dict = """constant"""
SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup"""
SCREAMING_SNAKE_CASE__ : str = """piecewise_constant"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple:
return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) )
return 1.0
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int:
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' )
lowerCAmelCase_ : int = int(lowerCAmelCase_ )
lowerCAmelCase_ : str = float(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = value
lowerCAmelCase_ : int = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ):
def rule_func(lowerCAmelCase_ ) -> float:
lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any:
lowerCAmelCase_ : Dict = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase_ : List[Any] = lr_init - lr_end
lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps
lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] ={
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) | 619 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : int ={
"""configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""],
"""tokenization_lxmert""": ["""LxmertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] =["""LxmertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int =[
"""LxmertEncoder""",
"""LxmertForPreTraining""",
"""LxmertForQuestionAnswering""",
"""LxmertModel""",
"""LxmertPreTrainedModel""",
"""LxmertVisualFeatureEncoder""",
"""LxmertXLayer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple =[
"""TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLxmertForPreTraining""",
"""TFLxmertMainLayer""",
"""TFLxmertModel""",
"""TFLxmertPreTrainedModel""",
"""TFLxmertVisualFeatureEncoder""",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_UpperCAmelCase : Optional[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 |
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
from typing import Dict, Optional
import numpy as np
import datasets
_UpperCAmelCase : Dict ="""
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
_UpperCAmelCase : List[str] ="""
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
_UpperCAmelCase : List[str] ="""\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , )-> Any:
if label_map is not None:
for old_id, new_id in label_map.items():
lowerCAmelCase_ : List[Any] = new_id
# turn into Numpy arrays
lowerCAmelCase_ : Dict = np.array(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = np.array(lowerCAmelCase_ )
if reduce_labels:
lowerCAmelCase_ : Optional[Any] = 255
lowerCAmelCase_ : Dict = label - 1
lowerCAmelCase_ : Any = 255
lowerCAmelCase_ : Union[str, Any] = label != ignore_index
lowerCAmelCase_ : Tuple = np.not_equal(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = pred_label[mask]
lowerCAmelCase_ : Optional[int] = np.array(lowerCAmelCase_ )[mask]
lowerCAmelCase_ : Dict = pred_label[pred_label == label]
lowerCAmelCase_ : Optional[int] = np.histogram(lowerCAmelCase_ , bins=lowerCAmelCase_ , range=(0, num_labels - 1) )[0]
lowerCAmelCase_ : List[str] = np.histogram(lowerCAmelCase_ , bins=lowerCAmelCase_ , range=(0, num_labels - 1) )[0]
lowerCAmelCase_ : int = np.histogram(lowerCAmelCase_ , bins=lowerCAmelCase_ , range=(0, num_labels - 1) )[0]
lowerCAmelCase_ : Optional[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , )-> Tuple:
lowerCAmelCase_ : int = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase_ : str = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase_ : Optional[int] = np.zeros((num_labels,) , dtype=np.floataa )
lowerCAmelCase_ : Dict = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = intersect_and_union(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , )-> Optional[int]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = total_intersect_and_union(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# compute metrics
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : Optional[Any] = total_area_intersect.sum() / total_area_label.sum()
lowerCAmelCase_ : int = total_area_intersect / total_area_union
lowerCAmelCase_ : Union[str, Any] = total_area_intersect / total_area_label
lowerCAmelCase_ : str = np.nanmean(lowerCAmelCase_ )
lowerCAmelCase_ : Any = np.nanmean(lowerCAmelCase_ )
lowerCAmelCase_ : Dict = all_acc
lowerCAmelCase_ : Any = iou
lowerCAmelCase_ : List[Any] = acc
if nan_to_num is not None:
lowerCAmelCase_ : int = {metric: np.nan_to_num(lowerCAmelCase_ , nan=lowerCAmelCase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case__( datasets.Metric ):
'''simple docstring'''
def lowercase_ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = mean_iou(
results=__lowercase , gt_seg_maps=__lowercase , num_labels=__lowercase , ignore_index=__lowercase , nan_to_num=__lowercase , label_map=__lowercase , reduce_labels=__lowercase , )
return iou_result | 619 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_UpperCAmelCase : Any ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS)
_UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_UpperCAmelCase : Any ={
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : Any = None
# source code of `config_class`
lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ )
lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase_ : List[str] = ckpt_name
break
return checkpoint
def lowerCAmelCase ( )-> Optional[Any]:
lowerCAmelCase_ : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 619 | 1 |
def lowerCAmelCase ( lowerCAmelCase_ = 1_000 )-> int:
lowerCAmelCase_ : List[Any] = 2**power
lowerCAmelCase_ : Any = 0
while n:
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 619 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple:
lowerCAmelCase_ : Optional[int] = scheduler
lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers]
lowerCAmelCase_ : str = split_batches
lowerCAmelCase_ : Any = step_with_optimizer
lowerCAmelCase_ : Optional[Any] = GradientState()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowercase , **__lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowercase , **__lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes
for _ in range(__lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowercase , **__lowercase )
else:
self.scheduler.step(*__lowercase , **__lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
return self.scheduler.get_last_lr()
def lowercase_ ( self ) -> List[str]:
return self.scheduler.state_dict()
def lowercase_ ( self , __lowercase ) -> int:
self.scheduler.load_state_dict(__lowercase )
def lowercase_ ( self ) -> Tuple:
return self.scheduler.get_lr()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> int:
return self.scheduler.print_lr(*__lowercase , **__lowercase ) | 619 | 1 |
import math
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase ( lowerCAmelCase_ = 0.1 )-> int:
lowerCAmelCase_ : Tuple = 3
lowerCAmelCase_ : Dict = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCAmelCase_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 |
from manim import *
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 )
lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
lowerCAmelCase_ : str = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 )
lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
lowerCAmelCase_ : int = []
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
rect.set_stroke(__lowercase )
lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 )
self.add(__lowercase )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 )
target.move_to(__lowercase )
ckpt_arr.append(__lowercase )
lowerCAmelCase_ : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_ : str = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 )
lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) )
lowerCAmelCase_ : int = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : int = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(FadeOut(__lowercase ) )
lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) )
self.play(
FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , )
self.wait() | 619 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 619 |
_UpperCAmelCase : Dict =[
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[str] = 0
while place < len(lowerCAmelCase_ ):
if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : List[Any] = []
for arabic, roman in ROMAN:
((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
_UpperCAmelCase : Dict =[
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[str] = 0
while place < len(lowerCAmelCase_ ):
if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : List[Any] = []
for arabic, roman in ROMAN:
((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 |
import csv
import tweepy
# Twitter API credentials
_UpperCAmelCase : int =""""""
_UpperCAmelCase : Optional[int] =""""""
_UpperCAmelCase : Dict =""""""
_UpperCAmelCase : str =""""""
def lowerCAmelCase ( lowerCAmelCase_ )-> None:
# authorize twitter, initialize tweepy
lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ )
auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase_ : Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# save the id of the oldest tweet less one
lowerCAmelCase_ : str = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase_ ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase_ : Optional[Any] = api.user_timeline(
screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# update the id of the oldest tweet less one
lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1
print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(lowerCAmelCase_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""") | 619 | 1 |
import os
import time
import numpy as np
import onnxruntime as ort
_UpperCAmelCase : Union[str, Any] ="""1"""
_UpperCAmelCase : Any ="""0"""
_UpperCAmelCase : List[str] ="""1"""
_UpperCAmelCase : Optional[Any] =ort.SessionOptions()
_UpperCAmelCase : str =ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("""Create inference session...""")
_UpperCAmelCase : int =["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""]
_UpperCAmelCase : Union[str, Any] =ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider)
_UpperCAmelCase : Tuple =ort.RunOptions()
_UpperCAmelCase : Union[str, Any] =128
_UpperCAmelCase : Tuple =1
_UpperCAmelCase : List[Any] =np.ones((batch, sequence), dtype=np.intaa)
_UpperCAmelCase : str =np.ones((batch, sequence), dtype=np.intaa)
_UpperCAmelCase : int =np.ones((batch, sequence), dtype=np.intaa)
print("""Warm up phase...""")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Start inference...""")
_UpperCAmelCase : List[str] =time.time()
_UpperCAmelCase : Union[str, Any] =2000
_UpperCAmelCase : str ={}
for iter in range(max_iters):
_UpperCAmelCase : Union[str, Any] =sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1000 / max_iters)) | 619 |
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : str = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : List[Any] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Any = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) )
lowerCAmelCase_ : List[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : Tuple = 0
# filters actual prime numbers.
lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : Any = 2
lowerCAmelCase_ : List[str] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Dict = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : Union[str, Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : Any = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : List[Any] = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : Tuple = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[str] = 0
while numbera != 0:
lowerCAmelCase_ : int = numbera % numbera
lowerCAmelCase_ : Union[str, Any] = numbera
lowerCAmelCase_ : Tuple = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Dict = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Tuple = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Optional[Any] = 1
lowerCAmelCase_ : Tuple = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Any = ans
ans += fiba
lowerCAmelCase_ : Dict = tmp
return ans | 619 | 1 |
from itertools import product
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> list[int]:
lowerCAmelCase_ : int = sides_number
lowerCAmelCase_ : List[str] = max_face_number * dice_number
lowerCAmelCase_ : Any = [0] * (max_total + 1)
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : str = range(lowerCAmelCase_ , max_face_number + 1 )
for dice_numbers in product(lowerCAmelCase_ , repeat=lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[Any] = sum(lowerCAmelCase_ )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCAmelCase ( )-> float:
lowerCAmelCase_ : List[Any] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCAmelCase_ : Optional[int] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : List[str] = 9
lowerCAmelCase_ : Union[str, Any] = 4 * 9
lowerCAmelCase_ : Tuple = 6
for peter_total in range(lowerCAmelCase_ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCAmelCase_ : Union[str, Any] = (4**9) * (6**6)
lowerCAmelCase_ : Union[str, Any] = peter_wins_count / total_games_number
lowerCAmelCase_ : Optional[Any] = round(lowerCAmelCase_ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""") | 619 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_UpperCAmelCase : Tuple =10
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
if array[i] == target:
return i
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : int = len(lowerCAmelCase_ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1
lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : Dict = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : List[Any] = two_third + 1
else:
lowerCAmelCase_ : Union[str, Any] = one_third + 1
lowerCAmelCase_ : Tuple = two_third - 1
else:
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1
lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
_UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip())
_UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target)
_UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""") | 619 | 1 |
import re
import string
import numpy as np
import datasets
_UpperCAmelCase : Union[str, Any] ="""
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_UpperCAmelCase : int ="""
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_UpperCAmelCase : Optional[Any] ="""
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case__( datasets.Metric ):
'''simple docstring'''
def lowercase_ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase=None , __lowercase=False , __lowercase=False , __lowercase=False , ) -> Optional[Any]:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCAmelCase_ : Union[str, Any] = np.array([re.sub(__lowercase , '''''' , __lowercase ) for x in predictions] )
lowerCAmelCase_ : Dict = np.array([re.sub(__lowercase , '''''' , __lowercase ) for x in references] )
else:
lowerCAmelCase_ : Any = np.asarray(__lowercase )
lowerCAmelCase_ : Dict = np.asarray(__lowercase )
if ignore_case:
lowerCAmelCase_ : int = np.char.lower(__lowercase )
lowerCAmelCase_ : Tuple = np.char.lower(__lowercase )
if ignore_punctuation:
lowerCAmelCase_ : int = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
lowerCAmelCase_ : int = np.char.translate(__lowercase , table=__lowercase )
lowerCAmelCase_ : Optional[Any] = np.char.translate(__lowercase , table=__lowercase )
if ignore_numbers:
lowerCAmelCase_ : str = string.digits.maketrans('''''' , '''''' , string.digits )
lowerCAmelCase_ : Union[str, Any] = np.char.translate(__lowercase , table=__lowercase )
lowerCAmelCase_ : Any = np.char.translate(__lowercase , table=__lowercase )
lowerCAmelCase_ : Optional[int] = predictions == references
return {"exact_match": np.mean(__lowercase ) * 1_0_0} | 619 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Union[str, Any] ={
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple =[
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 | 1 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=True , __lowercase=9_9 , __lowercase=3_2 , __lowercase=5 , __lowercase=4 , __lowercase=3_7 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=1_6 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , ) -> Tuple:
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : Dict = batch_size
lowerCAmelCase_ : str = seq_length
lowerCAmelCase_ : Any = is_training
lowerCAmelCase_ : Union[str, Any] = use_input_mask
lowerCAmelCase_ : List[Any] = use_token_type_ids
lowerCAmelCase_ : Optional[int] = use_labels
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : List[Any] = hidden_size
lowerCAmelCase_ : List[str] = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : Optional[int] = intermediate_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : List[Any] = type_vocab_size
lowerCAmelCase_ : str = type_sequence_label_size
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : Any = num_labels
lowerCAmelCase_ : Tuple = num_choices
lowerCAmelCase_ : List[str] = scope
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Optional[Any] = None
if self.use_input_mask:
lowerCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Tuple = None
if self.use_token_type_ids:
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Dict = None
if self.use_labels:
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ) -> Optional[Any]:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
lowerCAmelCase_ : int = BioGptModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Dict = model(__lowercase , attention_mask=__lowercase )
lowerCAmelCase_ : int = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]:
lowerCAmelCase_ : int = BioGptForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Dict = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , *__lowercase ) -> List[str]:
lowerCAmelCase_ : Any = BioGptModel(config=__lowercase )
model.to(__lowercase )
model.eval()
# create attention mask
lowerCAmelCase_ : Dict = torch.ones(input_ids.shape , dtype=torch.long , device=__lowercase )
lowerCAmelCase_ : str = self.seq_length // 2
lowerCAmelCase_ : Tuple = 0
# first forward pass
lowerCAmelCase_ , lowerCAmelCase_ : Any = model(__lowercase , attention_mask=__lowercase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase_ : Dict = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
lowerCAmelCase_ : Any = ids_tensor((1,) , __lowercase ).item() + 1
lowerCAmelCase_ : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
lowerCAmelCase_ : Tuple = random_other_next_tokens
# append to next input_ids and attn_mask
lowerCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase_ : Dict = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__lowercase )] , dim=1 , )
# get two different outputs
lowerCAmelCase_ : Optional[Any] = model(__lowercase , attention_mask=__lowercase )['''last_hidden_state''']
lowerCAmelCase_ : Union[str, Any] = model(__lowercase , past_key_values=__lowercase , attention_mask=__lowercase )['''last_hidden_state''']
# select random slice
lowerCAmelCase_ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase_ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCAmelCase_ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-3 ) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , *__lowercase ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = BioGptModel(config=__lowercase ).to(__lowercase ).eval()
lowerCAmelCase_ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__lowercase )
# first forward pass
lowerCAmelCase_ : List[Any] = model(__lowercase , attention_mask=__lowercase , use_cache=__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ : int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCAmelCase_ : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase_ : int = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCAmelCase_ : Optional[Any] = model(__lowercase , attention_mask=__lowercase )['''last_hidden_state''']
lowerCAmelCase_ : Optional[Any] = model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase )[
'''last_hidden_state'''
]
# select random slice
lowerCAmelCase_ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase_ : int = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-3 ) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , *__lowercase , __lowercase=False ) -> List[Any]:
lowerCAmelCase_ : List[Any] = BioGptForCausalLM(__lowercase )
model.to(__lowercase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
lowerCAmelCase_ : Optional[int] = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def lowercase_ ( self , __lowercase , *__lowercase ) -> Any:
lowerCAmelCase_ : List[str] = BioGptModel(__lowercase )
lowerCAmelCase_ : List[str] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , *__lowercase ) -> List[Any]:
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : str = BioGptForTokenClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : List[Any] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : int = config_and_inputs
lowerCAmelCase_ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Any = (BioGptForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : List[Any] = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : str = BioGptModelTester(self )
lowerCAmelCase_ : Any = ConfigTester(self , config_class=__lowercase , hidden_size=3_7 )
def lowercase_ ( self ) -> Dict:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ : List[Any] = type
self.model_tester.create_and_check_model(*__lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__lowercase )
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__lowercase , gradient_checkpointing=__lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__lowercase )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__lowercase )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__lowercase )
@slow
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__lowercase )
lowerCAmelCase_ : Optional[Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowerCAmelCase_ : int = '''left'''
# Define PAD Token = EOS Token = 50256
lowerCAmelCase_ : int = tokenizer.eos_token
lowerCAmelCase_ : List[str] = model.config.eos_token_id
# use different length sentences to test batching
lowerCAmelCase_ : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
lowerCAmelCase_ : int = tokenizer(__lowercase , return_tensors='''pt''' , padding=__lowercase )
lowerCAmelCase_ : Tuple = inputs['''input_ids'''].to(__lowercase )
lowerCAmelCase_ : Union[str, Any] = model.generate(
input_ids=__lowercase , attention_mask=inputs['''attention_mask'''].to(__lowercase ) , )
lowerCAmelCase_ : Tuple = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__lowercase )
lowerCAmelCase_ : Optional[Any] = model.generate(input_ids=__lowercase )
lowerCAmelCase_ : Any = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
lowerCAmelCase_ : int = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__lowercase )
lowerCAmelCase_ : str = model.generate(input_ids=__lowercase , max_length=model.config.max_length - num_paddings )
lowerCAmelCase_ : Tuple = tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase )
lowerCAmelCase_ : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__lowercase )
lowerCAmelCase_ : List[str] = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , [non_padded_sentence, padded_sentence] )
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : str = BioGptModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowercase_ ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Tuple = 3
lowerCAmelCase_ : Tuple = input_dict['''input_ids''']
lowerCAmelCase_ : Optional[Any] = input_ids.ne(1 ).to(__lowercase )
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase_ : Optional[Any] = BioGptForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : int = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : List[str] = '''multi_label_classification'''
lowerCAmelCase_ : Union[str, Any] = input_dict['''input_ids''']
lowerCAmelCase_ : int = input_ids.ne(1 ).to(__lowercase )
lowerCAmelCase_ : Dict = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase_ : List[str] = BioGptForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Dict = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
lowerCAmelCase_ : Dict = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
lowerCAmelCase_ : str = model(__lowercase )[0]
lowerCAmelCase_ : Tuple = 4_2_3_8_4
lowerCAmelCase_ : str = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __lowercase )
lowerCAmelCase_ : Tuple = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1e-4 ) )
@slow
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowerCAmelCase_ : Optional[Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__lowercase )
torch.manual_seed(0 )
lowerCAmelCase_ : Dict = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Optional[int] = model.generate(
**__lowercase , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=__lowercase , )
lowerCAmelCase_ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__lowercase )
lowerCAmelCase_ : List[Any] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__lowercase , __lowercase ) | 619 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_UpperCAmelCase : Any =False
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> Union[str, Any]:
return 1_2
@property
def lowercase_ ( self ) -> Any:
return 1_2
@property
def lowercase_ ( self ) -> Optional[Any]:
return 3_2
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Any = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__lowercase )
@property
def lowercase_ ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = 1_2
lowerCAmelCase_ : int = 1_2
lowerCAmelCase_ : Union[str, Any] = {
'''attention_bias''': True,
'''cross_attention_dim''': 3_2,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 3_2,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase )
return model
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : List[Any] = '''cpu'''
lowerCAmelCase_ : Any = self.dummy_vqvae
lowerCAmelCase_ : str = self.dummy_text_encoder
lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer
lowerCAmelCase_ : int = self.dummy_transformer
lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase )
lowerCAmelCase_ : Dict = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : int = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : List[Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = '''cpu'''
lowerCAmelCase_ : str = self.dummy_vqvae
lowerCAmelCase_ : Dict = self.dummy_text_encoder
lowerCAmelCase_ : List[Any] = self.dummy_tokenizer
lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer
lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings(
learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowerCAmelCase_ : List[str] = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : str = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Optional[int] = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0 | 619 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase : Tuple ={"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] =["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str =[
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase : Dict =None
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : Dict ={
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase : Tuple ="""▁"""
# Segments (not really needed)
_UpperCAmelCase : str =0
_UpperCAmelCase : List[str] =1
_UpperCAmelCase : int =2
_UpperCAmelCase : Any =3
_UpperCAmelCase : List[Any] =4
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = """left"""
SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : Dict = do_lower_case
lowerCAmelCase_ : Dict = remove_space
lowerCAmelCase_ : List[str] = keep_accents
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : str = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 619 | 1 |
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
lowerCAmelCase_ : int = [int(lowerCAmelCase_ ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(lowerCAmelCase_ ) == 4 and all(0 <= int(lowerCAmelCase_ ) <= 254 for octet in octets )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] =input().strip()
_UpperCAmelCase : str ="""valid""" if is_ip_va_address_valid(ip) else """invalid"""
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""") | 619 |
import math
import qiskit
def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts:
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' )
lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowerCAmelCase_ : Any = [input_a, input_a, carry_in]
lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits
lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""") | 619 | 1 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=True , __lowercase=9_9 , __lowercase=6_4 , __lowercase=5 , __lowercase=4 , __lowercase=6_4 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=1_6 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , ) -> Optional[int]:
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : List[Any] = batch_size
lowerCAmelCase_ : Any = seq_length
lowerCAmelCase_ : int = is_training
lowerCAmelCase_ : str = use_input_mask
lowerCAmelCase_ : str = use_token_type_ids
lowerCAmelCase_ : List[Any] = use_labels
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : List[Any] = max_position_embeddings
lowerCAmelCase_ : str = type_vocab_size
lowerCAmelCase_ : str = type_sequence_label_size
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : List[str] = num_labels
lowerCAmelCase_ : List[str] = num_choices
lowerCAmelCase_ : int = scope
def lowercase_ ( self ) -> Dict:
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : int = None
if self.use_input_mask:
lowerCAmelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Union[str, Any] = None
if self.use_labels:
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : str = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ) -> int:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[int]:
lowerCAmelCase_ : Dict = MPNetModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Tuple = model(__lowercase , __lowercase )
lowerCAmelCase_ : Union[str, Any] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
lowerCAmelCase_ : Tuple = MPNetForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(
__lowercase , attention_mask=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
lowerCAmelCase_ : Tuple = self.num_labels
lowerCAmelCase_ : List[Any] = MPNetForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : int = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
lowerCAmelCase_ : Any = self.num_choices
lowerCAmelCase_ : List[Any] = MPNetForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Dict = model(
__lowercase , attention_mask=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
lowerCAmelCase_ : Optional[Any] = self.num_labels
lowerCAmelCase_ : Tuple = MPNetForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Union[str, Any] = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) : int = config_and_inputs
lowerCAmelCase_ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : int = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = True
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = MPNetModelTester(self )
lowerCAmelCase_ : Dict = ConfigTester(self , config_class=__lowercase , hidden_size=3_7 )
def lowercase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__lowercase )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__lowercase )
@require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
lowerCAmelCase_ : Tuple = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase_ : str = model(__lowercase )[0]
lowerCAmelCase_ : List[Any] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __lowercase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1e-4 ) ) | 619 |
import re
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 619 | 1 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_UpperCAmelCase : str =(
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_UpperCAmelCase : list[int] =[ord(letter) for letter in string.ascii_lowercase]
_UpperCAmelCase : set[int] ={ord(char) for char in VALID_CHARS}
_UpperCAmelCase : list[str] =["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> str | None:
lowerCAmelCase_ : str = ""
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
for keychar, cipherchar in zip(cycle(lowerCAmelCase_ ) , lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCAmelCase_ )
return decoded
def lowerCAmelCase ( lowerCAmelCase_ )-> list[str]:
lowerCAmelCase_ : list[str] = []
for key in product(lowerCAmelCase_ , repeat=3 ):
lowerCAmelCase_ : Union[str, Any] = try_key(lowerCAmelCase_ , lowerCAmelCase_ )
if encoded is not None:
possibles.append(lowerCAmelCase_ )
return possibles
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCAmelCase ( lowerCAmelCase_ = "p059_cipher.txt" )-> int:
lowerCAmelCase_ : list[int]
lowerCAmelCase_ : list[str]
lowerCAmelCase_ : str
lowerCAmelCase_ : str
lowerCAmelCase_ : str = Path(lowerCAmelCase_ ).parent.joinpath(lowerCAmelCase_ ).read_text(encoding='''utf-8''' )
lowerCAmelCase_ : Tuple = [int(lowerCAmelCase_ ) for number in data.strip().split(''',''' )]
lowerCAmelCase_ : List[Any] = filter_valid_chars(lowerCAmelCase_ )
for common_word in COMMON_WORDS:
lowerCAmelCase_ : str = filter_common_word(lowerCAmelCase_ , lowerCAmelCase_ )
if len(lowerCAmelCase_ ) == 1:
break
lowerCAmelCase_ : Optional[Any] = possibles[0]
return sum(ord(lowerCAmelCase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""") | 619 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4}
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = do_resize
lowerCAmelCase_ : Optional[int] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCAmelCase_ : Tuple = resample
lowerCAmelCase_ : Optional[int] = do_rescale
lowerCAmelCase_ : Any = rescale_factor
lowerCAmelCase_ : List[str] = do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray:
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowerCAmelCase_ : Optional[int] = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct )
lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase_ : str = resample if resample is not None else self.resample
lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : int = size if size is not None else self.size
lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_rescale:
lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
lowerCAmelCase_ : Dict = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase ) | 619 | 1 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
if not is_accelerate_available():
return method
lowerCAmelCase_ : int = version.parse(accelerate.__version__ ).base_version
if version.parse(lowerCAmelCase_ ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *lowerCAmelCase_ , **lowerCAmelCase_ )
return wrapper | 619 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] ={
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese"""
def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : str = intermediate_multiple_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Dict = rotary_pct
lowerCAmelCase_ : Union[str, Any] = rotary_emb_base
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : Dict = hidden_dropout | 619 | 1 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self , __lowercase , __lowercase ) -> Tuple:
return f"""gaussian_noise_s={seed}_shape={"_".join([str(__lowercase ) for s in shape] )}.npy"""
def lowercase_ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase_ ( self , __lowercase=0 , __lowercase=(4, 4, 6_4, 6_4) , __lowercase=False ) -> Union[str, Any]:
lowerCAmelCase_ : Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase_ : List[str] = jnp.array(load_hf_numpy(self.get_file_format(__lowercase , __lowercase ) ) , dtype=__lowercase )
return image
def lowercase_ ( self , __lowercase=False , __lowercase="CompVis/stable-diffusion-v1-4" ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase_ : Optional[int] = '''bf16''' if fpaa else None
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = FlaxUNetaDConditionModel.from_pretrained(
__lowercase , subfolder='''unet''' , dtype=__lowercase , revision=__lowercase )
return model, params
def lowercase_ ( self , __lowercase=0 , __lowercase=(4, 7_7, 7_6_8) , __lowercase=False ) -> Union[str, Any]:
lowerCAmelCase_ : Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase_ : List[str] = jnp.array(load_hf_numpy(self.get_file_format(__lowercase , __lowercase ) ) , dtype=__lowercase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[1_7, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 1_0_0_0, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
] )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__lowercase )
lowerCAmelCase_ : str = self.get_latents(__lowercase , fpaa=__lowercase )
lowerCAmelCase_ : Optional[Any] = self.get_encoder_hidden_states(__lowercase , fpaa=__lowercase )
lowerCAmelCase_ : List[Any] = model.apply(
{'''params''': params} , __lowercase , jnp.array(__lowercase , dtype=jnp.intaa ) , encoder_hidden_states=__lowercase , ).sample
assert sample.shape == latents.shape
lowerCAmelCase_ : Optional[int] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowerCAmelCase_ : str = jnp.array(__lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__lowercase , __lowercase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[1_7, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 1_0_0_0, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
] )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__lowercase )
lowerCAmelCase_ : Optional[Any] = self.get_latents(__lowercase , shape=(4, 4, 9_6, 9_6) , fpaa=__lowercase )
lowerCAmelCase_ : List[str] = self.get_encoder_hidden_states(__lowercase , shape=(4, 7_7, 1_0_2_4) , fpaa=__lowercase )
lowerCAmelCase_ : List[Any] = model.apply(
{'''params''': params} , __lowercase , jnp.array(__lowercase , dtype=jnp.intaa ) , encoder_hidden_states=__lowercase , ).sample
assert sample.shape == latents.shape
lowerCAmelCase_ : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowerCAmelCase_ : Tuple = jnp.array(__lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__lowercase , __lowercase , atol=1e-2 ) | 619 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]:
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss
lowerCAmelCase_ : List[Any] = num_queries
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : Dict = min_size
lowerCAmelCase_ : List[str] = max_size
lowerCAmelCase_ : Any = num_labels
lowerCAmelCase_ : str = mask_feature_size
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowercase )
lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase )
lowerCAmelCase_ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5
).float()
lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long()
lowerCAmelCase_ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs()
lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states
lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states
lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int:
with torch.no_grad():
lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase )
model.to(__lowercase )
model.eval()
def comm_check_on_output(__lowercase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Any = model(__lowercase )
comm_check_on_output(__lowercase )
lowerCAmelCase_ : List[Any] = model(
pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
comm_check_on_output(__lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Any = MaskFormerModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def lowercase_ ( self ) -> Any:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase_ ( self ) -> str:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Tuple = model_class(__lowercase )
lowerCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2
lowerCAmelCase_ : List[Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(),
}
lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase )
lowerCAmelCase_ : Dict = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase )
lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : int = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss
loss.backward()
def lowercase_ ( self ) -> Optional[int]:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : Any = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Any = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_UpperCAmelCase : Dict =1E-4
def lowerCAmelCase ( )-> Any:
lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ ( self ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase )
lowerCAmelCase_ : Dict = self.default_image_processor
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(**__lowercase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : int = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Tuple = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : Dict = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : int = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : int = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Union[str, Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase )
lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']]
lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
self.assertTrue(outputs.loss is not None ) | 619 | 1 |
import math
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> float:
return math.pow(lowerCAmelCase_ , 2 ) - a
def lowerCAmelCase ( lowerCAmelCase_ )-> float:
return 2 * x
def lowerCAmelCase ( lowerCAmelCase_ )-> float:
lowerCAmelCase_ : str = 2.0
while start <= a:
lowerCAmelCase_ : Dict = math.pow(lowerCAmelCase_ , 2 )
return start
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 9_999 , lowerCAmelCase_ = 0.00000000000001 )-> float:
if a < 0:
raise ValueError('''math domain error''' )
lowerCAmelCase_ : List[Any] = get_initial_point(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
lowerCAmelCase_ : Union[str, Any] = value
lowerCAmelCase_ : List[str] = value - fx(lowerCAmelCase_ , lowerCAmelCase_ ) / fx_derivative(lowerCAmelCase_ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod() | 619 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
SCREAMING_SNAKE_CASE__ : str = "audio"
SCREAMING_SNAKE_CASE__ : str = "transcription"
def lowercase_ ( self , __lowercase ) -> int:
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowercase ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
lowerCAmelCase_ : List[str] = copy.deepcopy(self )
lowerCAmelCase_ : Optional[Any] = self.input_schema.copy()
lowerCAmelCase_ : Optional[Any] = features[self.audio_column]
lowerCAmelCase_ : List[str] = input_schema
return task_template
@property
def lowercase_ ( self ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"} | 619 | 1 |
from __future__ import annotations
import pandas as pd
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> list[int]:
lowerCAmelCase_ : Tuple = [0] * no_of_processes
lowerCAmelCase_ : Any = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[int] = burst_time[i]
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Union[str, Any] = 999_999_999
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : str = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowerCAmelCase_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
lowerCAmelCase_ : List[Any] = remaining_time[j]
lowerCAmelCase_ : List[Any] = j
lowerCAmelCase_ : Dict = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
lowerCAmelCase_ : List[str] = remaining_time[short]
if minm == 0:
lowerCAmelCase_ : List[Any] = 999_999_999
if remaining_time[short] == 0:
complete += 1
lowerCAmelCase_ : Optional[Any] = False
# Find finish time of current process
lowerCAmelCase_ : Optional[int] = increment_time + 1
# Calculate waiting time
lowerCAmelCase_ : int = finish_time - arrival_time[short]
lowerCAmelCase_ : str = finar - burst_time[short]
if waiting_time[short] < 0:
lowerCAmelCase_ : Any = 0
# Increment time
increment_time += 1
return waiting_time
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> list[int]:
lowerCAmelCase_ : List[Any] = [0] * no_of_processes
for i in range(lowerCAmelCase_ ):
lowerCAmelCase_ : int = burst_time[i] + waiting_time[i]
return turn_around_time
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> None:
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Union[str, Any] = 0
for i in range(lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[Any] = total_waiting_time + waiting_time[i]
lowerCAmelCase_ : Optional[Any] = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
_UpperCAmelCase : Any =int(input())
_UpperCAmelCase : List[Any] =[0] * no_of_processes
_UpperCAmelCase : str =[0] * no_of_processes
_UpperCAmelCase : Any =list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] =map(int, input().split())
_UpperCAmelCase : int =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_UpperCAmelCase : Tuple =burst_time
_UpperCAmelCase : List[str] =no_of_processes
_UpperCAmelCase : Dict =waiting_time
_UpperCAmelCase : int =calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_UpperCAmelCase : Dict =pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs) | 619 |
_UpperCAmelCase : int =frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : Dict =frozenset([])
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : Tuple =frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : str =frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""])
_UpperCAmelCase : Optional[int] =frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
_UpperCAmelCase : Optional[Any] =frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""])
_UpperCAmelCase : Union[str, Any] =frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""])
_UpperCAmelCase : Any =frozenset(["""class_labels"""])
_UpperCAmelCase : List[Any] =frozenset(["""class_labels"""])
_UpperCAmelCase : int =frozenset(["""batch_size"""])
_UpperCAmelCase : str =frozenset([])
_UpperCAmelCase : str =frozenset(["""batch_size"""])
_UpperCAmelCase : Optional[Any] =frozenset([])
_UpperCAmelCase : Tuple =frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : List[str] =frozenset(["""input_tokens"""])
_UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""]) | 619 | 1 |
import os
def lowerCAmelCase ( )-> Optional[Any]:
with open(os.path.dirname(lowerCAmelCase_ ) + '''/grid.txt''' ) as f:
lowerCAmelCase_ : List[str] = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowerCAmelCase_ ) for x in f.readline().split()] )
lowerCAmelCase_ : List[str] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCAmelCase_ : List[str] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCAmelCase_ : Optional[Any] = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCAmelCase_ : str = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCAmelCase_ : Union[str, Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCAmelCase_ : Tuple = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCAmelCase_ : Tuple = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCAmelCase_ : Optional[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCAmelCase_ : Any = temp
return maximum
if __name__ == "__main__":
print(solution()) | 619 |
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int:
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[Any] = {1: 1}
for inputa in range(2 , lowerCAmelCase_ ):
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Dict = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ : Tuple = counter
if counter > pre_counter:
lowerCAmelCase_ : Optional[int] = inputa
lowerCAmelCase_ : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip()))) | 619 | 1 |
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowerCAmelCase_ , 2 ) - pow(lowerCAmelCase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowerCAmelCase_ , 2 ) - pow(lowerCAmelCase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowerCAmelCase_ , 2 ) + pow(lowerCAmelCase_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder"""
SCREAMING_SNAKE_CASE__ : str = True
def __init__( self , **__lowercase ) -> Union[str, Any]:
super().__init__(**__lowercase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase_ : str = kwargs.pop('''encoder''' )
lowerCAmelCase_ : int = encoder_config.pop('''model_type''' )
lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' )
lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : Any = True
@classmethod
def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCAmelCase_ : int = True
lowerCAmelCase_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : List[str] = self.encoder.to_dict()
lowerCAmelCase_ : Dict = self.decoder.to_dict()
lowerCAmelCase_ : Optional[Any] = self.__class__.model_type
return output | 619 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_UpperCAmelCase : Any =logging.getLogger(__name__)
@dataclass
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=128, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
SCREAMING_SNAKE_CASE__ : bool = field(
default=UpperCAmelCase__, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
SCREAMING_SNAKE_CASE__ : bool = field(
default=UpperCAmelCase__, metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
}, )
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=UpperCAmelCase__, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=UpperCAmelCase__, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=UpperCAmelCase__, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
}, )
@dataclass
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(
default=UpperCAmelCase__, metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
SCREAMING_SNAKE_CASE__ : str = field(
default=UpperCAmelCase__, metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=UpperCAmelCase__, metadata={"""help""": """Train language if it is different from the evaluation language."""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=UpperCAmelCase__, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=UpperCAmelCase__, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=UpperCAmelCase__, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
SCREAMING_SNAKE_CASE__ : Optional[bool] = field(
default=UpperCAmelCase__, metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""}, )
SCREAMING_SNAKE_CASE__ : bool = field(
default=UpperCAmelCase__, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, )
SCREAMING_SNAKE_CASE__ : str = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
SCREAMING_SNAKE_CASE__ : bool = field(
default=UpperCAmelCase__, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
SCREAMING_SNAKE_CASE__ : bool = field(
default=UpperCAmelCase__, metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""}, )
def lowerCAmelCase ( )-> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase_ : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
datasets.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCAmelCase_ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase_ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCAmelCase_ : int = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCAmelCase_ : List[str] = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase_ : List[Any] = train_dataset.features['''label'''].names
if training_args.do_eval:
lowerCAmelCase_ : Union[str, Any] = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase_ : Optional[int] = eval_dataset.features['''label'''].names
if training_args.do_predict:
lowerCAmelCase_ : List[Any] = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase_ : Union[str, Any] = predict_dataset.features['''label'''].names
# Labels
lowerCAmelCase_ : str = len(lowerCAmelCase_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , idalabel={str(lowerCAmelCase_ ): label for i, label in enumerate(lowerCAmelCase_ )} , labelaid={label: i for i, label in enumerate(lowerCAmelCase_ )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase_ : List[str] = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase_ : Tuple = False
def preprocess_function(lowerCAmelCase_ ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=lowerCAmelCase_ , max_length=data_args.max_seq_length , truncation=lowerCAmelCase_ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase_ : int = min(len(lowerCAmelCase_ ) , data_args.max_train_samples )
lowerCAmelCase_ : int = train_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCAmelCase_ : List[Any] = train_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCAmelCase_ ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase_ : Optional[Any] = min(len(lowerCAmelCase_ ) , data_args.max_eval_samples )
lowerCAmelCase_ : Tuple = eval_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCAmelCase_ : Any = eval_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCAmelCase_ : Tuple = min(len(lowerCAmelCase_ ) , data_args.max_predict_samples )
lowerCAmelCase_ : int = predict_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
lowerCAmelCase_ : List[str] = predict_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
lowerCAmelCase_ : Optional[int] = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase_ ):
lowerCAmelCase_ : int = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase_ ) else p.predictions
lowerCAmelCase_ : List[str] = np.argmax(lowerCAmelCase_ , axis=1 )
return metric.compute(predictions=lowerCAmelCase_ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase_ : Tuple = default_data_collator
elif training_args.fpaa:
lowerCAmelCase_ : Optional[int] = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 )
else:
lowerCAmelCase_ : List[str] = None
# Initialize our Trainer
lowerCAmelCase_ : Union[str, Any] = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
lowerCAmelCase_ : List[Any] = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase_ : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase_ : Tuple = last_checkpoint
lowerCAmelCase_ : int = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = train_result.metrics
lowerCAmelCase_ : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
lowerCAmelCase_ : List[Any] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , lowerCAmelCase_ )
trainer.save_metrics('''train''' , lowerCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase_ : str = trainer.evaluate(eval_dataset=lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics('''eval''' , lowerCAmelCase_ )
trainer.save_metrics('''eval''' , lowerCAmelCase_ )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = trainer.predict(lowerCAmelCase_ , metric_key_prefix='''predict''' )
lowerCAmelCase_ : Any = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCAmelCase_ )
)
lowerCAmelCase_ : List[str] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics('''predict''' , lowerCAmelCase_ )
trainer.save_metrics('''predict''' , lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = np.argmax(lowerCAmelCase_ , axis=1 )
lowerCAmelCase_ : Any = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase_ ):
lowerCAmelCase_ : List[Any] = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
if __name__ == "__main__":
main() | 619 |
from __future__ import annotations
from math import pi
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=0.2 , __lowercase=0.2 ) -> str:
lowerCAmelCase_ : Union[str, Any] = bp_numa
lowerCAmelCase_ : Any = bp_numa
lowerCAmelCase_ : Dict = bp_numa
lowerCAmelCase_ : Optional[int] = conva_get[:2]
lowerCAmelCase_ : Any = conva_get[2]
lowerCAmelCase_ : Optional[int] = size_pa
lowerCAmelCase_ : Any = rate_w
lowerCAmelCase_ : Union[str, Any] = rate_t
lowerCAmelCase_ : int = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowerCAmelCase_ : Optional[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCAmelCase_ : Tuple = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCAmelCase_ : Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1
lowerCAmelCase_ : str = -2 * np.random.rand(self.num_bpa ) + 1
lowerCAmelCase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase_ ( self , __lowercase ) -> Dict:
# save model dict with pickle
lowerCAmelCase_ : str = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(__lowercase , '''wb''' ) as f:
pickle.dump(__lowercase , __lowercase )
print(f"""Model saved: {save_path}""" )
@classmethod
def lowercase_ ( cls , __lowercase ) -> List[Any]:
# read saved model
with open(__lowercase , '''rb''' ) as f:
lowerCAmelCase_ : int = pickle.load(__lowercase ) # noqa: S301
lowerCAmelCase_ : List[Any] = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
lowerCAmelCase_ : Optional[Any] = model_dic.get('''size_pooling1''' )
lowerCAmelCase_ : Tuple = model_dic.get('''num_bp1''' )
lowerCAmelCase_ : Optional[Any] = model_dic.get('''num_bp2''' )
lowerCAmelCase_ : Dict = model_dic.get('''num_bp3''' )
lowerCAmelCase_ : Optional[Any] = model_dic.get('''rate_weight''' )
lowerCAmelCase_ : Optional[Any] = model_dic.get('''rate_thre''' )
# create model instance
lowerCAmelCase_ : Any = CNN(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# modify model parameter
lowerCAmelCase_ : Any = model_dic.get('''w_conv1''' )
lowerCAmelCase_ : Dict = model_dic.get('''wkj''' )
lowerCAmelCase_ : str = model_dic.get('''vji''' )
lowerCAmelCase_ : Optional[Any] = model_dic.get('''thre_conv1''' )
lowerCAmelCase_ : Any = model_dic.get('''thre_bp2''' )
lowerCAmelCase_ : List[str] = model_dic.get('''thre_bp3''' )
return conv_ins
def lowercase_ ( self , __lowercase ) -> Optional[Any]:
return 1 / (1 + np.exp(-1 * x ))
def lowercase_ ( self , __lowercase ) -> int:
return round(__lowercase , 3 )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
# convolution process
lowerCAmelCase_ : int = convs[0]
lowerCAmelCase_ : Optional[Any] = convs[1]
lowerCAmelCase_ : List[Any] = np.shape(__lowercase )[0]
# get the data slice of original image data, data_focus
lowerCAmelCase_ : Optional[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , __lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , __lowercase ):
lowerCAmelCase_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : List[Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__lowercase ):
lowerCAmelCase_ : List[Any] = []
for i_focus in range(len(__lowercase ) ):
lowerCAmelCase_ : Dict = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowercase ) )
lowerCAmelCase_ : Union[str, Any] = np.asmatrix(__lowercase ).reshape(
__lowercase , __lowercase )
data_featuremap.append(__lowercase )
# expanding the data slice to One dimenssion
lowerCAmelCase_ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowercase ) )
lowerCAmelCase_ : Tuple = np.asarray(__lowercase )
return focus_list, data_featuremap
def lowercase_ ( self , __lowercase , __lowercase , __lowercase="average_pool" ) -> int:
# pooling process
lowerCAmelCase_ : Any = len(featuremaps[0] )
lowerCAmelCase_ : List[Any] = int(size_map / size_pooling )
lowerCAmelCase_ : Any = []
for i_map in range(len(__lowercase ) ):
lowerCAmelCase_ : str = featuremaps[i_map]
lowerCAmelCase_ : List[str] = []
for i_focus in range(0 , __lowercase , __lowercase ):
for j_focus in range(0 , __lowercase , __lowercase ):
lowerCAmelCase_ : Tuple = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowercase ) )
lowerCAmelCase_ : Any = np.asmatrix(__lowercase ).reshape(__lowercase , __lowercase )
featuremap_pooled.append(__lowercase )
return featuremap_pooled
def lowercase_ ( self , __lowercase ) -> List[str]:
# expanding three dimension data to one dimension list
lowerCAmelCase_ : Dict = []
for i in range(len(__lowercase ) ):
lowerCAmelCase_ : List[str] = np.shape(data[i] )
lowerCAmelCase_ : Optional[int] = data[i].reshape(1 , shapes[0] * shapes[1] )
lowerCAmelCase_ : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(__lowercase )
lowerCAmelCase_ : Dict = np.asarray(__lowercase )
return data_expanded
def lowercase_ ( self , __lowercase ) -> int:
# expanding matrix to one dimension list
lowerCAmelCase_ : str = np.asarray(__lowercase )
lowerCAmelCase_ : Optional[Any] = np.shape(__lowercase )
lowerCAmelCase_ : Any = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : Any = 0
for i_map in range(__lowercase ):
lowerCAmelCase_ : int = np.ones((size_map, size_map) )
for i in range(0 , __lowercase , __lowercase ):
for j in range(0 , __lowercase , __lowercase ):
lowerCAmelCase_ : Optional[Any] = pd_pool[
i_pool
]
lowerCAmelCase_ : Union[str, Any] = i_pool + 1
lowerCAmelCase_ : Optional[int] = np.multiply(
__lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(__lowercase )
return pd_all
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=bool ) -> Optional[Any]:
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(__lowercase )) )
print((''' - - Shape: Teach_Data ''', np.shape(__lowercase )) )
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
lowerCAmelCase_ : Tuple = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(__lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
lowerCAmelCase_ : str = np.asmatrix(datas_train[p] )
lowerCAmelCase_ : List[Any] = np.asarray(datas_teach[p] )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.convolute(
__lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase_ : Union[str, Any] = self.pooling(__lowercase , self.size_poolinga )
lowerCAmelCase_ : List[str] = np.shape(__lowercase )
lowerCAmelCase_ : List[Any] = self._expand(__lowercase )
lowerCAmelCase_ : Optional[int] = data_bp_input
lowerCAmelCase_ : Dict = np.dot(__lowercase , self.vji.T ) - self.thre_bpa
lowerCAmelCase_ : Union[str, Any] = self.sig(__lowercase )
lowerCAmelCase_ : int = np.dot(__lowercase , self.wkj.T ) - self.thre_bpa
lowerCAmelCase_ : int = self.sig(__lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowerCAmelCase_ : Union[str, Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(__lowercase , (1 - bp_outa) ) )
lowerCAmelCase_ : Tuple = np.multiply(
np.dot(__lowercase , self.wkj ) , np.multiply(__lowercase , (1 - bp_outa) ) )
lowerCAmelCase_ : Any = np.dot(__lowercase , self.vji )
lowerCAmelCase_ : Tuple = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowerCAmelCase_ : List[Any] = pd_conva_pooled.T.getA().tolist()
lowerCAmelCase_ : List[str] = self._calculate_gradient_from_pool(
__lowercase , __lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowerCAmelCase_ : int = self._expand_mat(pd_conva_all[k_conv] )
lowerCAmelCase_ : int = self.rate_weight * np.dot(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowerCAmelCase_ : List[str] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowerCAmelCase_ : Union[str, Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowerCAmelCase_ : Dict = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowerCAmelCase_ : List[Any] = self.thre_bpa - pd_k_all * self.rate_thre
lowerCAmelCase_ : Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowerCAmelCase_ : Optional[int] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowerCAmelCase_ : int = rp + 1
lowerCAmelCase_ : Dict = error_count / patterns
all_mse.append(__lowercase )
def draw_error():
lowerCAmelCase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__lowercase , '''+-''' )
plt.plot(__lowercase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(__lowercase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def lowercase_ ( self , __lowercase ) -> Optional[int]:
# model predict
lowerCAmelCase_ : Optional[int] = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(__lowercase )) )
for p in range(len(__lowercase ) ):
lowerCAmelCase_ : str = np.asmatrix(datas_test[p] )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.convolute(
__lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase_ : Dict = self.pooling(__lowercase , self.size_poolinga )
lowerCAmelCase_ : Optional[int] = self._expand(__lowercase )
lowerCAmelCase_ : Tuple = data_bp_input
lowerCAmelCase_ : Tuple = bp_outa * self.vji.T - self.thre_bpa
lowerCAmelCase_ : int = self.sig(__lowercase )
lowerCAmelCase_ : List[Any] = bp_outa * self.wkj.T - self.thre_bpa
lowerCAmelCase_ : Union[str, Any] = self.sig(__lowercase )
produce_out.extend(bp_outa.getA().tolist() )
lowerCAmelCase_ : Optional[Any] = [list(map(self.do_round , __lowercase ) ) for each in produce_out]
return np.asarray(__lowercase )
def lowercase_ ( self , __lowercase ) -> int:
# return the data of image after convoluting process so we can check it out
lowerCAmelCase_ : Optional[Any] = np.asmatrix(__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.convolute(
__lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase_ : str = self.pooling(__lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass | 619 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = """linear"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine"""
SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts"""
SCREAMING_SNAKE_CASE__ : List[str] = """polynomial"""
SCREAMING_SNAKE_CASE__ : Dict = """constant"""
SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup"""
SCREAMING_SNAKE_CASE__ : str = """piecewise_constant"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple:
return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) )
return 1.0
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int:
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' )
lowerCAmelCase_ : int = int(lowerCAmelCase_ )
lowerCAmelCase_ : str = float(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = value
lowerCAmelCase_ : int = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ):
def rule_func(lowerCAmelCase_ ) -> float:
lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any:
lowerCAmelCase_ : Dict = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase_ : List[Any] = lr_init - lr_end
lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps
lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] ={
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) | 619 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : int =logging.get_logger(__name__)
_UpperCAmelCase : Tuple ={
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = """sew-d"""
def __init__( self , __lowercase=3_2 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase=2 , __lowercase=5_1_2 , __lowercase=2_5_6 , __lowercase=True , __lowercase=True , __lowercase=("p2c", "c2p") , __lowercase="layer_norm" , __lowercase="gelu_python" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1e-7 , __lowercase=1e-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowercase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __lowercase=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __lowercase=False , __lowercase=1_2_8 , __lowercase=1_6 , __lowercase=True , __lowercase=0.05 , __lowercase=1_0 , __lowercase=2 , __lowercase=0.0 , __lowercase=1_0 , __lowercase=0 , __lowercase="mean" , __lowercase=False , __lowercase=False , __lowercase=2_5_6 , __lowercase=0 , __lowercase=1 , __lowercase=2 , **__lowercase , ) -> int:
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase )
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Optional[Any] = feat_extract_norm
lowerCAmelCase_ : Dict = feat_extract_activation
lowerCAmelCase_ : str = list(__lowercase )
lowerCAmelCase_ : List[str] = list(__lowercase )
lowerCAmelCase_ : str = list(__lowercase )
lowerCAmelCase_ : Dict = conv_bias
lowerCAmelCase_ : List[Any] = num_conv_pos_embeddings
lowerCAmelCase_ : List[Any] = num_conv_pos_embedding_groups
lowerCAmelCase_ : Optional[Any] = len(self.conv_dim )
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : Tuple = squeeze_factor
lowerCAmelCase_ : str = max_position_embeddings
lowerCAmelCase_ : Optional[int] = position_buckets
lowerCAmelCase_ : Any = share_att_key
lowerCAmelCase_ : Optional[Any] = relative_attention
lowerCAmelCase_ : List[str] = norm_rel_ebd
lowerCAmelCase_ : Tuple = list(__lowercase )
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : Dict = hidden_dropout
lowerCAmelCase_ : Dict = attention_dropout
lowerCAmelCase_ : List[str] = activation_dropout
lowerCAmelCase_ : Any = feat_proj_dropout
lowerCAmelCase_ : Dict = final_dropout
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : Optional[int] = feature_layer_norm_eps
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Optional[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase_ : int = apply_spec_augment
lowerCAmelCase_ : List[Any] = mask_time_prob
lowerCAmelCase_ : int = mask_time_length
lowerCAmelCase_ : Any = mask_time_min_masks
lowerCAmelCase_ : Tuple = mask_feature_prob
lowerCAmelCase_ : Union[str, Any] = mask_feature_length
lowerCAmelCase_ : int = mask_feature_min_masks
# ctc loss
lowerCAmelCase_ : int = ctc_loss_reduction
lowerCAmelCase_ : Optional[Any] = ctc_zero_infinity
# sequence classification
lowerCAmelCase_ : Optional[int] = use_weighted_layer_sum
lowerCAmelCase_ : Optional[Any] = classifier_proj_size
@property
def lowercase_ ( self ) -> List[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 619 |
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
SCREAMING_SNAKE_CASE__ : str = "audio"
SCREAMING_SNAKE_CASE__ : str = "transcription"
def lowercase_ ( self , __lowercase ) -> int:
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowercase ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
lowerCAmelCase_ : List[str] = copy.deepcopy(self )
lowerCAmelCase_ : Optional[Any] = self.input_schema.copy()
lowerCAmelCase_ : Optional[Any] = features[self.audio_column]
lowerCAmelCase_ : List[str] = input_schema
return task_template
@property
def lowercase_ ( self ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"} | 619 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_UpperCAmelCase : Any ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS)
_UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_UpperCAmelCase : Any ={
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : Any = None
# source code of `config_class`
lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ )
lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase_ : List[str] = ckpt_name
break
return checkpoint
def lowerCAmelCase ( )-> Optional[Any]:
lowerCAmelCase_ : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 619 | 1 |
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> float:
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> float:
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> float:
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
lowerCAmelCase_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple:
lowerCAmelCase_ : Optional[int] = scheduler
lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers]
lowerCAmelCase_ : str = split_batches
lowerCAmelCase_ : Any = step_with_optimizer
lowerCAmelCase_ : Optional[Any] = GradientState()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowercase , **__lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowercase , **__lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes
for _ in range(__lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowercase , **__lowercase )
else:
self.scheduler.step(*__lowercase , **__lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
return self.scheduler.get_last_lr()
def lowercase_ ( self ) -> List[str]:
return self.scheduler.state_dict()
def lowercase_ ( self , __lowercase ) -> int:
self.scheduler.load_state_dict(__lowercase )
def lowercase_ ( self ) -> Tuple:
return self.scheduler.get_lr()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> int:
return self.scheduler.print_lr(*__lowercase , **__lowercase ) | 619 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_UpperCAmelCase : Tuple ={"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
_UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 |
from manim import *
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 )
lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
lowerCAmelCase_ : str = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 )
lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
lowerCAmelCase_ : int = []
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
rect.set_stroke(__lowercase )
lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 )
self.add(__lowercase )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 )
target.move_to(__lowercase )
ckpt_arr.append(__lowercase )
lowerCAmelCase_ : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_ : str = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 )
lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) )
lowerCAmelCase_ : int = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : int = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(FadeOut(__lowercase ) )
lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) )
self.play(
FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , )
self.wait() | 619 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_UpperCAmelCase : str =logging.get_logger(__name__)
_UpperCAmelCase : int ={"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Dict ={
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : List[str] ={
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : str = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : Tuple = RobertaTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="replace" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=False , __lowercase=True , **__lowercase , ) -> Optional[Any]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase , **__lowercase , )
lowerCAmelCase_ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowercase ) != add_prefix_space:
lowerCAmelCase_ : Optional[Any] = getattr(__lowercase , pre_tok_state.pop('''type''' ) )
lowerCAmelCase_ : List[str] = add_prefix_space
lowerCAmelCase_ : List[Any] = pre_tok_class(**__lowercase )
lowerCAmelCase_ : int = add_prefix_space
lowerCAmelCase_ : Tuple = '''post_processor'''
lowerCAmelCase_ : Optional[int] = getattr(self.backend_tokenizer , __lowercase , __lowercase )
if tokenizer_component_instance:
lowerCAmelCase_ : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase_ : Tuple = tuple(state['''sep'''] )
if "cls" in state:
lowerCAmelCase_ : Any = tuple(state['''cls'''] )
lowerCAmelCase_ : Dict = False
if state.get('''add_prefix_space''' , __lowercase ) != add_prefix_space:
lowerCAmelCase_ : int = add_prefix_space
lowerCAmelCase_ : List[Any] = True
if state.get('''trim_offsets''' , __lowercase ) != trim_offsets:
lowerCAmelCase_ : Optional[int] = trim_offsets
lowerCAmelCase_ : Optional[Any] = True
if changes_to_apply:
lowerCAmelCase_ : str = getattr(__lowercase , state.pop('''type''' ) )
lowerCAmelCase_ : Union[str, Any] = component_class(**__lowercase )
setattr(self.backend_tokenizer , __lowercase , __lowercase )
@property
def lowercase_ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self , __lowercase ) -> int:
lowerCAmelCase_ : Optional[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else value
lowerCAmelCase_ : Optional[Any] = value
def lowercase_ ( self , *__lowercase , **__lowercase ) -> BatchEncoding:
lowerCAmelCase_ : List[str] = kwargs.get('''is_split_into_words''' , __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowercase , **__lowercase )
def lowercase_ ( self , *__lowercase , **__lowercase ) -> BatchEncoding:
lowerCAmelCase_ : Any = kwargs.get('''is_split_into_words''' , __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
lowerCAmelCase_ : str = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase=None ) -> List[str]:
lowerCAmelCase_ : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : List[Any] = [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 619 |
_UpperCAmelCase : Dict =[
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[str] = 0
while place < len(lowerCAmelCase_ ):
if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : List[Any] = []
for arabic, roman in ROMAN:
((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
lowerCAmelCase_ : List[str] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowerCAmelCase_ : int = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ).convert('''RGB''' )
lowerCAmelCase_ : str = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ),
] )
lowerCAmelCase_ : Optional[Any] = transform(lowerCAmelCase_ ).unsqueeze(0 ).to(lowerCAmelCase_ )
return image
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
if "visual_encoder" in key:
lowerCAmelCase_ : List[str] = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCAmelCase_ )
if "blocks" in key:
lowerCAmelCase_ : Optional[Any] = re.sub(r'''blocks''' , '''layers''' , lowerCAmelCase_ )
if "attn" in key:
lowerCAmelCase_ : List[str] = re.sub(r'''attn''' , '''self_attn''' , lowerCAmelCase_ )
if "norm1" in key:
lowerCAmelCase_ : Union[str, Any] = re.sub(r'''norm1''' , '''layer_norm1''' , lowerCAmelCase_ )
if "norm2" in key:
lowerCAmelCase_ : List[str] = re.sub(r'''norm2''' , '''layer_norm2''' , lowerCAmelCase_ )
if "encoder.norm" in key:
lowerCAmelCase_ : Optional[Any] = re.sub(r'''encoder.norm''' , '''post_layernorm''' , lowerCAmelCase_ )
if "encoder.patch_embed.proj" in key:
lowerCAmelCase_ : Dict = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCAmelCase_ )
if "encoder.pos_embed" in key:
lowerCAmelCase_ : Optional[Any] = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCAmelCase_ )
if "encoder.cls_token" in key:
lowerCAmelCase_ : Any = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCAmelCase_ )
if "self_attn" in key:
lowerCAmelCase_ : str = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , lowerCAmelCase_ )
return key
@torch.no_grad()
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=None )-> Any:
if config_path is not None:
lowerCAmelCase_ : Optional[int] = BlipConfig.from_pretrained(lowerCAmelCase_ )
else:
lowerCAmelCase_ : int = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowerCAmelCase_ : List[str] = BlipForConditionalGeneration(lowerCAmelCase_ ).eval()
lowerCAmelCase_ : Any = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowerCAmelCase_ : Optional[Any] = blip_decoder(pretrained=lowerCAmelCase_ , image_size=384 , vit='''base''' )
lowerCAmelCase_ : Union[str, Any] = pt_model.eval()
lowerCAmelCase_ : int = pt_model.state_dict()
for key in modified_state_dict.copy():
lowerCAmelCase_ : List[Any] = modified_state_dict.pop(lowerCAmelCase_ )
lowerCAmelCase_ : int = rename_key(lowerCAmelCase_ )
lowerCAmelCase_ : int = value
hf_model.load_state_dict(lowerCAmelCase_ )
lowerCAmelCase_ : int = 384
lowerCAmelCase_ : Any = load_demo_image(image_size=lowerCAmelCase_ , device='''cpu''' )
lowerCAmelCase_ : Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowerCAmelCase_ : Dict = tokenizer(['''a picture of'''] ).input_ids
lowerCAmelCase_ : Union[str, Any] = hf_model.generate(lowerCAmelCase_ , lowerCAmelCase_ )
assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
lowerCAmelCase_ : str = hf_model.generate(lowerCAmelCase_ )
assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCAmelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowerCAmelCase_ : List[str] = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowerCAmelCase_ : Dict = blip_vqa(pretrained=lowerCAmelCase_ , image_size=lowerCAmelCase_ , vit='''base''' )
vqa_model.eval()
lowerCAmelCase_ : List[str] = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowerCAmelCase_ : Tuple = modified_state_dict.pop(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = rename_key(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = value
lowerCAmelCase_ : str = BlipForQuestionAnswering(lowerCAmelCase_ )
hf_vqa_model.load_state_dict(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = ['''How many dogs are in this image?''']
lowerCAmelCase_ : List[Any] = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' ).input_ids
lowerCAmelCase_ : Union[str, Any] = hf_vqa_model.generate(lowerCAmelCase_ , lowerCAmelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowerCAmelCase_ : Dict = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowerCAmelCase_ : Any = blip_itm(pretrained=lowerCAmelCase_ , image_size=lowerCAmelCase_ , vit='''base''' )
itm_model.eval()
lowerCAmelCase_ : str = itm_model.state_dict()
for key in modified_state_dict.copy():
lowerCAmelCase_ : Optional[int] = modified_state_dict.pop(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = rename_key(lowerCAmelCase_ )
lowerCAmelCase_ : str = value
lowerCAmelCase_ : str = BlipForImageTextRetrieval(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = ['''A picture of a woman with a dog sitting in a beach''']
lowerCAmelCase_ : str = tokenizer(
lowerCAmelCase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCAmelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCAmelCase_ )
hf_itm_model.eval()
lowerCAmelCase_ : Tuple = hf_itm_model(lowerCAmelCase_ , lowerCAmelCase_ , use_itm_head=lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = hf_itm_model(lowerCAmelCase_ , lowerCAmelCase_ , use_itm_head=lowerCAmelCase_ )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
_UpperCAmelCase : Any =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_UpperCAmelCase : Optional[Any] =parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 619 |
import csv
import tweepy
# Twitter API credentials
_UpperCAmelCase : int =""""""
_UpperCAmelCase : Optional[int] =""""""
_UpperCAmelCase : Dict =""""""
_UpperCAmelCase : str =""""""
def lowerCAmelCase ( lowerCAmelCase_ )-> None:
# authorize twitter, initialize tweepy
lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ )
auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase_ : Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# save the id of the oldest tweet less one
lowerCAmelCase_ : str = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase_ ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase_ : Optional[Any] = api.user_timeline(
screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# update the id of the oldest tweet less one
lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1
print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(lowerCAmelCase_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""") | 619 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_UpperCAmelCase : Tuple ="""
Human: <<task>>
Assistant: """
_UpperCAmelCase : int ="""huggingface-tools/default-prompts"""
_UpperCAmelCase : List[str] ={"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="run" )-> Dict:
if prompt_or_repo_id is None:
lowerCAmelCase_ : List[Any] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , lowerCAmelCase_ ) is not None:
return prompt_or_repo_id
lowerCAmelCase_ : Optional[int] = cached_file(
lowerCAmelCase_ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' ) as f:
return f.read() | 619 |
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : str = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : List[Any] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Any = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) )
lowerCAmelCase_ : List[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : Tuple = 0
# filters actual prime numbers.
lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : Any = 2
lowerCAmelCase_ : List[str] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Dict = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : Union[str, Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : Any = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : List[Any] = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : Tuple = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[str] = 0
while numbera != 0:
lowerCAmelCase_ : int = numbera % numbera
lowerCAmelCase_ : Union[str, Any] = numbera
lowerCAmelCase_ : Tuple = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Dict = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Tuple = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Optional[Any] = 1
lowerCAmelCase_ : Tuple = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Any = ans
ans += fiba
lowerCAmelCase_ : Dict = tmp
return ans | 619 | 1 |
from datetime import datetime
import requests
def lowerCAmelCase ( lowerCAmelCase_ )-> bytes:
lowerCAmelCase_ : Dict = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
lowerCAmelCase_ : List[str] = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] =input("""Enter Video/IGTV url: """).strip()
_UpperCAmelCase : str =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""") | 619 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_UpperCAmelCase : Tuple =10
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
if array[i] == target:
return i
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : int = len(lowerCAmelCase_ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1
lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : Dict = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : List[Any] = two_third + 1
else:
lowerCAmelCase_ : Union[str, Any] = one_third + 1
lowerCAmelCase_ : Tuple = two_third - 1
else:
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1
lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
_UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip())
_UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target)
_UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""") | 619 | 1 |
import cva
import numpy as np
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase ) -> List[Any]:
if k in (0.04, 0.06):
lowerCAmelCase_ : Tuple = k
lowerCAmelCase_ : List[str] = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ) -> str:
return str(self.k )
def lowercase_ ( self , __lowercase ) -> tuple[cva.Mat, list[list[int]]]:
lowerCAmelCase_ : List[Any] = cva.imread(__lowercase , 0 )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = img.shape
lowerCAmelCase_ : list[list[int]] = []
lowerCAmelCase_ : Optional[int] = img.copy()
lowerCAmelCase_ : int = cva.cvtColor(__lowercase , cva.COLOR_GRAY2RGB )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = np.gradient(__lowercase )
lowerCAmelCase_ : Optional[int] = dx**2
lowerCAmelCase_ : List[Any] = dy**2
lowerCAmelCase_ : Optional[Any] = dx * dy
lowerCAmelCase_ : Optional[Any] = 0.04
lowerCAmelCase_ : int = self.window_size // 2
for y in range(__lowercase , h - offset ):
for x in range(__lowercase , w - offset ):
lowerCAmelCase_ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCAmelCase_ : Tuple = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCAmelCase_ : List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCAmelCase_ : List[str] = (wxx * wyy) - (wxy**2)
lowerCAmelCase_ : Any = wxx + wyy
lowerCAmelCase_ : Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] =HarrisCorner(0.04, 3)
_UpperCAmelCase , _UpperCAmelCase : int =edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img) | 619 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Union[str, Any] ={
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple =[
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 | 1 |
import socket
def lowerCAmelCase ( )-> Tuple:
lowerCAmelCase_ : Optional[int] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowerCAmelCase_ : Tuple = socket.gethostname()
lowerCAmelCase_ : List[str] = 12_312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
lowerCAmelCase_ : Optional[Any] = sock.recv(1_024 )
if not data:
break
out_file.write(lowerCAmelCase_ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main() | 619 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_UpperCAmelCase : Any =False
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> Union[str, Any]:
return 1_2
@property
def lowercase_ ( self ) -> Any:
return 1_2
@property
def lowercase_ ( self ) -> Optional[Any]:
return 3_2
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Any = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__lowercase )
@property
def lowercase_ ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = 1_2
lowerCAmelCase_ : int = 1_2
lowerCAmelCase_ : Union[str, Any] = {
'''attention_bias''': True,
'''cross_attention_dim''': 3_2,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 3_2,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase )
return model
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : List[Any] = '''cpu'''
lowerCAmelCase_ : Any = self.dummy_vqvae
lowerCAmelCase_ : str = self.dummy_text_encoder
lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer
lowerCAmelCase_ : int = self.dummy_transformer
lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase )
lowerCAmelCase_ : Dict = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : int = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : List[Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = '''cpu'''
lowerCAmelCase_ : str = self.dummy_vqvae
lowerCAmelCase_ : Dict = self.dummy_text_encoder
lowerCAmelCase_ : List[Any] = self.dummy_tokenizer
lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer
lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings(
learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowerCAmelCase_ : List[str] = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : str = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Optional[int] = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0 | 619 | 1 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase_ ( self ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : List[str] = ort.SessionOptions()
lowerCAmelCase_ : int = False
return options
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowerCAmelCase_ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowerCAmelCase_ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
lowerCAmelCase_ : Dict = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Optional[int] = '''A red cat sitting on a park bench'''
lowerCAmelCase_ : Any = np.random.RandomState(0 )
lowerCAmelCase_ : Optional[Any] = pipe(
prompt=__lowercase , image=__lowercase , mask_image=__lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=__lowercase , output_type='''np''' , )
lowerCAmelCase_ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-2 | 619 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase : Dict =None
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : Dict ={
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase : Tuple ="""▁"""
# Segments (not really needed)
_UpperCAmelCase : str =0
_UpperCAmelCase : List[str] =1
_UpperCAmelCase : int =2
_UpperCAmelCase : Any =3
_UpperCAmelCase : List[Any] =4
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = """left"""
SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : Dict = do_lower_case
lowerCAmelCase_ : Dict = remove_space
lowerCAmelCase_ : List[str] = keep_accents
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : str = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 619 | 1 |
import copy
import random
from transformers import CLIPTokenizer
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowercase , **__lowercase ) -> Dict:
super().__init__(*__lowercase , **__lowercase )
lowerCAmelCase_ : Union[str, Any] = {}
def lowercase_ ( self , __lowercase , *__lowercase , **__lowercase ) -> int:
lowerCAmelCase_ : Optional[Any] = super().add_tokens(__lowercase , *__lowercase , **__lowercase )
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
''' `placeholder_token` that is not already in the tokenizer.''' )
def lowercase_ ( self , __lowercase , *__lowercase , __lowercase=1 , **__lowercase ) -> Any:
lowerCAmelCase_ : Union[str, Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__lowercase , *__lowercase , **__lowercase )
output.append(__lowercase )
else:
lowerCAmelCase_ : Any = []
for i in range(__lowercase ):
lowerCAmelCase_ : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(__lowercase , *__lowercase , **__lowercase )
output.append(__lowercase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""" )
lowerCAmelCase_ : Dict = output
def lowercase_ ( self , __lowercase , __lowercase=False , __lowercase=1.0 ) -> str:
if isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : str = []
for i in range(len(__lowercase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__lowercase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCAmelCase_ : Optional[int] = self.token_map[placeholder_token]
lowerCAmelCase_ : int = tokens[: 1 + int(len(__lowercase ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCAmelCase_ : str = copy.copy(__lowercase )
random.shuffle(__lowercase )
lowerCAmelCase_ : Any = text.replace(__lowercase , ''' '''.join(__lowercase ) )
return text
def __call__( self , __lowercase , *__lowercase , __lowercase=False , __lowercase=1.0 , **__lowercase ) -> Any:
return super().__call__(
self.replace_placeholder_tokens_in_text(
__lowercase , vector_shuffle=__lowercase , prop_tokens_to_load=__lowercase ) , *__lowercase , **__lowercase , )
def lowercase_ ( self , __lowercase , *__lowercase , __lowercase=False , __lowercase=1.0 , **__lowercase ) -> Dict:
return super().encode(
self.replace_placeholder_tokens_in_text(
__lowercase , vector_shuffle=__lowercase , prop_tokens_to_load=__lowercase ) , *__lowercase , **__lowercase , ) | 619 |
import math
import qiskit
def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts:
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' )
lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowerCAmelCase_ : Any = [input_a, input_a, carry_in]
lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits
lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""") | 619 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE__ : List[str] = """CLIPImageProcessor"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __lowercase=None , __lowercase=None , **__lowercase ) -> Dict:
lowerCAmelCase_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowercase , )
lowerCAmelCase_ : str = kwargs.pop('''feature_extractor''' )
lowerCAmelCase_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowercase , __lowercase )
def __call__( self , __lowercase=None , __lowercase=None , __lowercase=None , **__lowercase ) -> Tuple:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCAmelCase_ : List[str] = self.tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase )
if images is not None:
lowerCAmelCase_ : Any = self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if text is not None and images is not None:
lowerCAmelCase_ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ) , tensor_type=__lowercase )
def lowercase_ ( self , *__lowercase , **__lowercase ) -> str:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def lowercase_ ( self , *__lowercase , **__lowercase ) -> int:
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : List[Any] = self.tokenizer.model_input_names
lowerCAmelCase_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase_ ( self ) -> str:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowercase , )
return self.image_processor_class
@property
def lowercase_ ( self ) -> str:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowercase , )
return self.image_processor | 619 |
import re
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 619 | 1 |
def lowerCAmelCase ( lowerCAmelCase_ = 1_000 )-> int:
return sum(e for e in range(3 , lowerCAmelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""") | 619 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4}
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = do_resize
lowerCAmelCase_ : Optional[int] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCAmelCase_ : Tuple = resample
lowerCAmelCase_ : Optional[int] = do_rescale
lowerCAmelCase_ : Any = rescale_factor
lowerCAmelCase_ : List[str] = do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray:
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowerCAmelCase_ : Optional[int] = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct )
lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase_ : str = resample if resample is not None else self.resample
lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : int = size if size is not None else self.size
lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_rescale:
lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
lowerCAmelCase_ : Dict = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase ) | 619 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_UpperCAmelCase : Dict =subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_UpperCAmelCase : Any =subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode("""utf-8""").split()
_UpperCAmelCase : Optional[int] ="""|""".join(sys.argv[1:])
_UpperCAmelCase : str =re.compile(Rf"""^({joined_dirs}).*?\.py$""")
_UpperCAmelCase : List[Any] =[x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""") | 619 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] ={
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese"""
def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : str = intermediate_multiple_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Dict = rotary_pct
lowerCAmelCase_ : Union[str, Any] = rotary_emb_base
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : Dict = hidden_dropout | 619 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_UpperCAmelCase : List[Any] =logging.get_logger(__name__)
_UpperCAmelCase : List[str] ={"""vocab_file""": """spiece.model"""}
_UpperCAmelCase : List[Any] ={
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , __lowercase = None , **__lowercase , ) -> None:
lowerCAmelCase_ : str = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
lowerCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
lowerCAmelCase_ : str = 3
lowerCAmelCase_ : Tuple = do_lower_case
lowerCAmelCase_ : Any = remove_space
lowerCAmelCase_ : Optional[int] = keep_accents
lowerCAmelCase_ : Any = vocab_file
lowerCAmelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
lowerCAmelCase_ : Optional[int] = jieba
lowerCAmelCase_ : Any = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def lowercase_ ( self ) -> List[Any]:
return len(self.sp_model )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = self.__dict__.copy()
lowerCAmelCase_ : List[str] = None
return state
def __setstate__( self , __lowercase ) -> Tuple:
lowerCAmelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self , __lowercase ) -> Optional[int]:
if self.remove_space:
lowerCAmelCase_ : str = ''' '''.join(inputs.strip().split() )
else:
lowerCAmelCase_ : Optional[Any] = inputs
lowerCAmelCase_ : Dict = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowerCAmelCase_ : List[Any] = unicodedata.normalize('''NFKD''' , __lowercase )
lowerCAmelCase_ : List[Any] = ''''''.join([c for c in outputs if not unicodedata.combining(__lowercase )] )
if self.do_lower_case:
lowerCAmelCase_ : Dict = outputs.lower()
return outputs
def lowercase_ ( self , __lowercase ) -> List[str]:
lowerCAmelCase_ : List[str] = self.preprocess_text(__lowercase )
lowerCAmelCase_ : Optional[Any] = self.sp_model.encode(__lowercase , out_type=__lowercase )
lowerCAmelCase_ : List[str] = []
for piece in pieces:
if len(__lowercase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowercase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase_ : Optional[int] = cur_pieces[1:]
else:
lowerCAmelCase_ : List[str] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowercase )
else:
new_pieces.append(__lowercase )
return new_pieces
def lowercase_ ( self , __lowercase ) -> int:
return self.sp_model.PieceToId(__lowercase )
def lowercase_ ( self , __lowercase ) -> int:
return self.sp_model.IdToPiece(__lowercase )
def lowercase_ ( self , __lowercase ) -> Tuple:
lowerCAmelCase_ : Any = ''''''.join(__lowercase ).replace(__lowercase , ''' ''' ).strip()
return out_string
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : List[Any] = [self.sep_token_id]
lowerCAmelCase_ : str = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is not None:
return ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1, 1]
return ([0] * len(__lowercase )) + [1, 1]
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
lowerCAmelCase_ : str = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : Any = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , '''wb''' ) as fi:
lowerCAmelCase_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
def lowercase_ ( self , *__lowercase , **__lowercase ) -> Optional[Any]:
lowerCAmelCase_ : Tuple = super()._decode(*__lowercase , **__lowercase )
lowerCAmelCase_ : Any = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text | 619 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]:
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss
lowerCAmelCase_ : List[Any] = num_queries
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : Dict = min_size
lowerCAmelCase_ : List[str] = max_size
lowerCAmelCase_ : Any = num_labels
lowerCAmelCase_ : str = mask_feature_size
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowercase )
lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase )
lowerCAmelCase_ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5
).float()
lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long()
lowerCAmelCase_ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs()
lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states
lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states
lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int:
with torch.no_grad():
lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase )
model.to(__lowercase )
model.eval()
def comm_check_on_output(__lowercase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Any = model(__lowercase )
comm_check_on_output(__lowercase )
lowerCAmelCase_ : List[Any] = model(
pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
comm_check_on_output(__lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Any = MaskFormerModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def lowercase_ ( self ) -> Any:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase_ ( self ) -> str:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Tuple = model_class(__lowercase )
lowerCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2
lowerCAmelCase_ : List[Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(),
}
lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase )
lowerCAmelCase_ : Dict = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase )
lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : int = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss
loss.backward()
def lowercase_ ( self ) -> Optional[int]:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : Any = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Any = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_UpperCAmelCase : Dict =1E-4
def lowerCAmelCase ( )-> Any:
lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ ( self ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase )
lowerCAmelCase_ : Dict = self.default_image_processor
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(**__lowercase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : int = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Tuple = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : Dict = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : int = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : int = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Union[str, Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase )
lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']]
lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
self.assertTrue(outputs.loss is not None ) | 619 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
_UpperCAmelCase : Tuple =None
_UpperCAmelCase : int =logging.get_logger(__name__)
_UpperCAmelCase : List[str] ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Tuple ={
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : List[str] ={
"""google/bigbird-roberta-base""": 4096,
"""google/bigbird-roberta-large""": 4096,
"""google/bigbird-base-trivia-itc""": 4096,
}
_UpperCAmelCase : Dict ="""▁"""
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Dict = BigBirdTokenizer
SCREAMING_SNAKE_CASE__ : Tuple = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase="[SEP]" , __lowercase="[MASK]" , __lowercase="[CLS]" , **__lowercase , ) -> int:
lowerCAmelCase_ : List[str] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else bos_token
lowerCAmelCase_ : Dict = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else eos_token
lowerCAmelCase_ : Tuple = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else unk_token
lowerCAmelCase_ : Optional[int] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else pad_token
lowerCAmelCase_ : str = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else cls_token
lowerCAmelCase_ : str = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Optional[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
lowerCAmelCase_ : Tuple = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Dict = [self.sep_token_id]
lowerCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1]
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
lowerCAmelCase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : Any = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 619 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
SCREAMING_SNAKE_CASE__ : str = "audio"
SCREAMING_SNAKE_CASE__ : str = "transcription"
def lowercase_ ( self , __lowercase ) -> int:
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowercase ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
lowerCAmelCase_ : List[str] = copy.deepcopy(self )
lowerCAmelCase_ : Optional[Any] = self.input_schema.copy()
lowerCAmelCase_ : Optional[Any] = features[self.audio_column]
lowerCAmelCase_ : List[str] = input_schema
return task_template
@property
def lowercase_ ( self ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"} | 619 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
lowerCAmelCase_ : List[str] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionLatentUpscalePipeline
SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
SCREAMING_SNAKE_CASE__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE__ : Tuple = frozenset([] )
SCREAMING_SNAKE_CASE__ : Optional[int] = True
@property
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : Any = 4
lowerCAmelCase_ : str = (1_6, 1_6)
lowerCAmelCase_ : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowercase )
return image
def lowercase_ ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=__lowercase , block_out_channels=[3_2, 3_2, 6_4, 6_4] , time_cond_proj_dim=1_6_0 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=3_2 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=__lowercase , only_cross_attention=__lowercase , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
lowerCAmelCase_ : int = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
lowerCAmelCase_ : str = EulerDiscreteScheduler(prediction_type='''sample''' )
lowerCAmelCase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''quick_gelu''' , projection_dim=5_1_2 , )
lowerCAmelCase_ : str = CLIPTextModel(__lowercase )
lowerCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCAmelCase_ : List[str] = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowercase_ ( self , __lowercase , __lowercase=0 ) -> Union[str, Any]:
if str(__lowercase ).startswith('''mps''' ):
lowerCAmelCase_ : Optional[int] = torch.manual_seed(__lowercase )
else:
lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowerCAmelCase_ : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Dict = '''cpu'''
lowerCAmelCase_ : str = self.get_dummy_components()
lowerCAmelCase_ : int = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Optional[Any] = self.get_dummy_inputs(__lowercase )
lowerCAmelCase_ : Any = pipe(**__lowercase ).images
lowerCAmelCase_ : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_5_6, 2_5_6, 3) )
lowerCAmelCase_ : Dict = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
lowerCAmelCase_ : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowercase , 1e-3 )
def lowercase_ ( self ) -> int:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def lowercase_ ( self ) -> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def lowercase_ ( self ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowercase_ ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def lowercase_ ( self ) -> int:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def lowercase_ ( self ) -> str:
super().test_save_load_local(expected_max_difference=3e-3 )
def lowercase_ ( self ) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
lowerCAmelCase_ : Any = self.get_dummy_components()
lowerCAmelCase_ : Any = self.pipeline_class(**__lowercase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Optional[Any] = self.get_dummy_inputs(__lowercase )
lowerCAmelCase_ : Tuple = 2
lowerCAmelCase_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowerCAmelCase_ : str = getattr(__lowercase , scheduler_enum.name )
lowerCAmelCase_ : int = scheduler_cls.from_config(pipe.scheduler.config )
lowerCAmelCase_ : str = pipe(**__lowercase )[0]
outputs.append(__lowercase )
assert check_same_shape(__lowercase )
@require_torch_gpu
@slow
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[int] = torch.manual_seed(3_3 )
lowerCAmelCase_ : List[str] = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
lowerCAmelCase_ : List[str] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
lowerCAmelCase_ : Dict = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
lowerCAmelCase_ : Optional[Any] = pipe(__lowercase , generator=__lowercase , output_type='''latent''' ).images
lowerCAmelCase_ : Optional[Any] = upscaler(
prompt=__lowercase , image=__lowercase , num_inference_steps=2_0 , guidance_scale=0 , generator=__lowercase , output_type='''np''' , ).images[0]
lowerCAmelCase_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : str = torch.manual_seed(3_3 )
lowerCAmelCase_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
lowerCAmelCase_ : List[str] = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
lowerCAmelCase_ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
lowerCAmelCase_ : List[Any] = upscaler(
prompt=__lowercase , image=__lowercase , num_inference_steps=2_0 , guidance_scale=0 , generator=__lowercase , output_type='''np''' , ).images[0]
lowerCAmelCase_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2 | 619 |
_UpperCAmelCase : int =frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : Dict =frozenset([])
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : Tuple =frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : str =frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""])
_UpperCAmelCase : Optional[int] =frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
_UpperCAmelCase : Optional[Any] =frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""])
_UpperCAmelCase : Union[str, Any] =frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""])
_UpperCAmelCase : Any =frozenset(["""class_labels"""])
_UpperCAmelCase : List[Any] =frozenset(["""class_labels"""])
_UpperCAmelCase : int =frozenset(["""batch_size"""])
_UpperCAmelCase : str =frozenset([])
_UpperCAmelCase : str =frozenset(["""batch_size"""])
_UpperCAmelCase : Optional[Any] =frozenset([])
_UpperCAmelCase : Tuple =frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : List[str] =frozenset(["""input_tokens"""])
_UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""]) | 619 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Union[str, Any] =logging.get_logger(__name__)
_UpperCAmelCase : Dict ={
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = """gptsan-japanese"""
SCREAMING_SNAKE_CASE__ : str = [
"""past_key_values""",
]
SCREAMING_SNAKE_CASE__ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __lowercase=3_6_0_0_0 , __lowercase=1_2_8_0 , __lowercase=1_0_2_4 , __lowercase=8_1_9_2 , __lowercase=4_0_9_6 , __lowercase=1_2_8 , __lowercase=1_0 , __lowercase=0 , __lowercase=1_6 , __lowercase=1_6 , __lowercase=1_2_8 , __lowercase=0.0 , __lowercase=1e-5 , __lowercase=False , __lowercase=0.0 , __lowercase="float32" , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=0.0_02 , __lowercase=False , __lowercase=True , __lowercase=3_5_9_9_8 , __lowercase=3_5_9_9_5 , __lowercase=3_5_9_9_9 , **__lowercase , ) -> Tuple:
lowerCAmelCase_ : str = vocab_size
lowerCAmelCase_ : str = max_position_embeddings
lowerCAmelCase_ : Optional[int] = d_model
lowerCAmelCase_ : Any = d_ff
lowerCAmelCase_ : Tuple = d_ext
lowerCAmelCase_ : List[str] = d_spout
lowerCAmelCase_ : Optional[Any] = num_switch_layers
lowerCAmelCase_ : Tuple = num_ext_layers
lowerCAmelCase_ : List[Any] = num_switch_layers + num_ext_layers
lowerCAmelCase_ : int = num_heads
lowerCAmelCase_ : str = num_experts
lowerCAmelCase_ : Optional[int] = expert_capacity
lowerCAmelCase_ : int = dropout_rate
lowerCAmelCase_ : Any = layer_norm_epsilon
lowerCAmelCase_ : Dict = router_bias
lowerCAmelCase_ : Optional[int] = router_jitter_noise
lowerCAmelCase_ : Tuple = router_dtype
lowerCAmelCase_ : Union[str, Any] = router_ignore_padding_tokens
lowerCAmelCase_ : Tuple = output_hidden_states
lowerCAmelCase_ : Optional[int] = output_attentions
lowerCAmelCase_ : int = initializer_factor
lowerCAmelCase_ : Any = output_router_logits
lowerCAmelCase_ : int = use_cache
super().__init__(
separator_token_id=__lowercase , pad_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase , ) | 619 |
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int:
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[Any] = {1: 1}
for inputa in range(2 , lowerCAmelCase_ ):
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Dict = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ : Tuple = counter
if counter > pre_counter:
lowerCAmelCase_ : Optional[int] = inputa
lowerCAmelCase_ : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip()))) | 619 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : int ={"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] =["""ViTFeatureExtractor"""]
_UpperCAmelCase : str =["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] =[
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str =[
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] =[
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_UpperCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder"""
SCREAMING_SNAKE_CASE__ : str = True
def __init__( self , **__lowercase ) -> Union[str, Any]:
super().__init__(**__lowercase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase_ : str = kwargs.pop('''encoder''' )
lowerCAmelCase_ : int = encoder_config.pop('''model_type''' )
lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' )
lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : Any = True
@classmethod
def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCAmelCase_ : int = True
lowerCAmelCase_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : List[str] = self.encoder.to_dict()
lowerCAmelCase_ : Dict = self.decoder.to_dict()
lowerCAmelCase_ : Optional[Any] = self.__class__.model_type
return output | 619 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCAmelCase : Dict =logging.get_logger(__name__)
@dataclass
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase=False , __lowercase=False , __lowercase=6.0 , __lowercase=None , __lowercase=False , __lowercase=False , __lowercase=None , __lowercase="fp4" , __lowercase=False , **__lowercase , ) -> Any:
lowerCAmelCase_ : Union[str, Any] = load_in_abit
lowerCAmelCase_ : Union[str, Any] = load_in_abit
lowerCAmelCase_ : Optional[int] = llm_inta_threshold
lowerCAmelCase_ : Union[str, Any] = llm_inta_skip_modules
lowerCAmelCase_ : List[Any] = llm_inta_enable_fpaa_cpu_offload
lowerCAmelCase_ : List[Any] = llm_inta_has_fpaa_weight
lowerCAmelCase_ : str = bnb_abit_quant_type
lowerCAmelCase_ : List[str] = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowerCAmelCase_ : List[str] = torch.floataa
elif isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : int = getattr(__lowercase , __lowercase )
elif isinstance(__lowercase , torch.dtype ):
lowerCAmelCase_ : str = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' )
self.post_init()
def lowercase_ ( self ) -> Optional[int]:
if not isinstance(self.llm_inta_threshold , __lowercase ):
raise ValueError('''llm_int8_threshold must be a float''' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __lowercase ):
raise ValueError('''llm_int8_skip_modules must be a list of strings''' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __lowercase ):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' )
if not isinstance(self.llm_inta_has_fpaa_weight , __lowercase ):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' )
if not isinstance(self.bnb_abit_quant_type , __lowercase ):
raise ValueError('''bnb_4bit_quant_type must be a string''' )
if not isinstance(self.bnb_abit_use_double_quant , __lowercase ):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' )
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse(
'''0.39.0''' ):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' )
def lowercase_ ( self ) -> str:
return self.load_in_abit or self.load_in_abit
def lowercase_ ( self ) -> Dict:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> str:
lowerCAmelCase_ : Union[str, Any] = cls(**__lowercase )
lowerCAmelCase_ : Dict = []
for key, value in kwargs.items():
if hasattr(__lowercase , __lowercase ):
setattr(__lowercase , __lowercase , __lowercase )
to_remove.append(__lowercase )
for key in to_remove:
kwargs.pop(__lowercase , __lowercase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def lowercase_ ( self , __lowercase ) -> Union[str, Any]:
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer:
lowerCAmelCase_ : Union[str, Any] = self.to_dict()
lowerCAmelCase_ : Optional[int] = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '''\n'''
writer.write(__lowercase )
def lowercase_ ( self ) -> Dict[str, Any]:
lowerCAmelCase_ : Dict = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Any = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1]
return output
def __repr__( self ) -> int:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def lowercase_ ( self , __lowercase = True ) -> str:
if use_diff is True:
lowerCAmelCase_ : List[Any] = self.to_diff_dict()
else:
lowerCAmelCase_ : Tuple = self.to_dict()
return json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + "\n"
def lowercase_ ( self ) -> Dict[str, Any]:
lowerCAmelCase_ : int = self.to_dict()
# get the default config dict
lowerCAmelCase_ : int = BitsAndBytesConfig().to_dict()
lowerCAmelCase_ : Any = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowerCAmelCase_ : List[Any] = value
return serializable_config_dict | 619 |
from __future__ import annotations
from math import pi
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
from __future__ import annotations
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase ) -> Any:
lowerCAmelCase_ : Union[str, Any] = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(__lowercase ) != 0:
lowerCAmelCase_ : Optional[Any] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__lowercase ) != cols:
raise error
for value in row:
if not isinstance(__lowercase , (int, float) ):
raise error
lowerCAmelCase_ : Optional[Any] = rows
else:
lowerCAmelCase_ : Optional[int] = []
def lowercase_ ( self ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowercase_ ( self ) -> int:
return len(self.rows )
@property
def lowercase_ ( self ) -> int:
return len(self.rows[0] )
@property
def lowercase_ ( self ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def lowercase_ ( self ) -> bool:
return self.order[0] == self.order[1]
def lowercase_ ( self ) -> Matrix:
lowerCAmelCase_ : List[str] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__lowercase )
def lowercase_ ( self ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowercase_ ( self ) -> bool:
return bool(self.determinant() )
def lowercase_ ( self , __lowercase , __lowercase ) -> int:
lowerCAmelCase_ : Tuple = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__lowercase ).determinant()
def lowercase_ ( self , __lowercase , __lowercase ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(__lowercase , __lowercase )
return -1 * self.get_minor(__lowercase , __lowercase )
def lowercase_ ( self ) -> Matrix:
return Matrix(
[
[self.get_minor(__lowercase , __lowercase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowercase_ ( self ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowercase_ ( self ) -> Matrix:
lowerCAmelCase_ : Union[str, Any] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__lowercase )
def lowercase_ ( self ) -> Matrix:
lowerCAmelCase_ : Tuple = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
return str(self.rows )
def __str__( self ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(__lowercase ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def lowercase_ ( self , __lowercase , __lowercase = None ) -> None:
lowerCAmelCase_ : Dict = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(__lowercase , __lowercase ):
raise type_error
for value in row:
if not isinstance(__lowercase , (int, float) ):
raise type_error
if len(__lowercase ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(__lowercase )
else:
lowerCAmelCase_ : Optional[int] = self.rows[0:position] + [row] + self.rows[position:]
def lowercase_ ( self , __lowercase , __lowercase = None ) -> None:
lowerCAmelCase_ : int = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(__lowercase , __lowercase ):
raise type_error
for value in column:
if not isinstance(__lowercase , (int, float) ):
raise type_error
if len(__lowercase ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
lowerCAmelCase_ : Tuple = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowerCAmelCase_ : Dict = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , __lowercase ) -> bool:
if not isinstance(__lowercase , __lowercase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , __lowercase ) -> bool:
return not self == other
def __neg__( self ) -> Matrix:
return self * -1
def __add__( self , __lowercase ) -> Matrix:
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , __lowercase ) -> Matrix:
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , __lowercase ) -> Matrix:
if isinstance(__lowercase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__lowercase , __lowercase ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(__lowercase , __lowercase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self , __lowercase ) -> Matrix:
if not isinstance(__lowercase , __lowercase ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
lowerCAmelCase_ : Dict = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowercase_ ( cls , __lowercase , __lowercase ) -> int:
return sum(row[i] * column[i] for i in range(len(__lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = """linear"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine"""
SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts"""
SCREAMING_SNAKE_CASE__ : List[str] = """polynomial"""
SCREAMING_SNAKE_CASE__ : Dict = """constant"""
SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup"""
SCREAMING_SNAKE_CASE__ : str = """piecewise_constant"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple:
return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) )
return 1.0
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int:
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' )
lowerCAmelCase_ : int = int(lowerCAmelCase_ )
lowerCAmelCase_ : str = float(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = value
lowerCAmelCase_ : int = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ):
def rule_func(lowerCAmelCase_ ) -> float:
lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any:
lowerCAmelCase_ : Dict = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase_ : List[Any] = lr_init - lr_end
lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps
lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] ={
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) | 619 | 1 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
lowerCAmelCase_ : Optional[Any] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowerCAmelCase_ : List[Any] = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase_ : List[str] = in_proj_weight[
: encoder_config.hidden_size, :
]
lowerCAmelCase_ : Tuple = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowerCAmelCase_ : List[Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
lowerCAmelCase_ : str = dct.pop(lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = val
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
if "handwritten" in checkpoint_url:
lowerCAmelCase_ : Optional[Any] = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCAmelCase_ : Dict = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
lowerCAmelCase_ : int = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> str:
lowerCAmelCase_ : Any = ViTConfig(image_size=384 , qkv_bias=lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowerCAmelCase_ : Optional[Any] = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
lowerCAmelCase_ : Tuple = 1_024
lowerCAmelCase_ : Tuple = 4_096
lowerCAmelCase_ : Dict = 24
lowerCAmelCase_ : int = 16
lowerCAmelCase_ : List[str] = 1_024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCAmelCase_ : int = False
lowerCAmelCase_ : List[str] = '''relu'''
lowerCAmelCase_ : Optional[int] = 1_024
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : int = False
lowerCAmelCase_ : str = False
# load HuggingFace model
lowerCAmelCase_ : Dict = ViTModel(lowerCAmelCase_ , add_pooling_layer=lowerCAmelCase_ )
lowerCAmelCase_ : str = TrOCRForCausalLM(lowerCAmelCase_ )
lowerCAmelCase_ : str = VisionEncoderDecoderModel(encoder=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
model.eval()
# load state_dict of original model, rename some keys
lowerCAmelCase_ : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location='''cpu''' , check_hash=lowerCAmelCase_ )['''model''']
lowerCAmelCase_ : str = create_rename_keys(lowerCAmelCase_ , lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowerCAmelCase_ : Tuple = state_dict.pop(lowerCAmelCase_ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
lowerCAmelCase_ : Tuple = val
else:
lowerCAmelCase_ : List[Any] = val
# load state dict
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
lowerCAmelCase_ : List[str] = ViTImageProcessor(size=encoder_config.image_size )
lowerCAmelCase_ : str = RobertaTokenizer.from_pretrained('''roberta-large''' )
lowerCAmelCase_ : Dict = TrOCRProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = processor(images=prepare_img(lowerCAmelCase_ ) , return_tensors='''pt''' ).pixel_values
# verify logits
lowerCAmelCase_ : Any = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowerCAmelCase_ : Tuple = model(pixel_values=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = outputs.logits
lowerCAmelCase_ : Optional[Any] = torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
lowerCAmelCase_ : int = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
lowerCAmelCase_ : List[str] = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
lowerCAmelCase_ : Optional[int] = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
lowerCAmelCase_ : Optional[int] = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCAmelCase_ , atol=1e-3 ), "First elements of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_UpperCAmelCase : Optional[Any] =parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 619 |
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_UpperCAmelCase : Dict ={"""UserAgent""": UserAgent().random}
def lowerCAmelCase ( lowerCAmelCase_ )-> dict:
lowerCAmelCase_ : Tuple = script.contents[0]
lowerCAmelCase_ : Optional[int] = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase ) -> int:
lowerCAmelCase_ : Optional[Any] = f"""https://www.instagram.com/{username}/"""
lowerCAmelCase_ : List[Any] = self.get_json()
def lowercase_ ( self ) -> dict:
lowerCAmelCase_ : List[str] = requests.get(self.url , headers=__lowercase ).text
lowerCAmelCase_ : Dict = BeautifulSoup(__lowercase , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> str:
return f"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ) -> str:
return f"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def lowercase_ ( self ) -> str:
return self.user_data["username"]
@property
def lowercase_ ( self ) -> str:
return self.user_data["full_name"]
@property
def lowercase_ ( self ) -> str:
return self.user_data["biography"]
@property
def lowercase_ ( self ) -> str:
return self.user_data["business_email"]
@property
def lowercase_ ( self ) -> str:
return self.user_data["external_url"]
@property
def lowercase_ ( self ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def lowercase_ ( self ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def lowercase_ ( self ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowercase_ ( self ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def lowercase_ ( self ) -> bool:
return self.user_data["is_verified"]
@property
def lowercase_ ( self ) -> bool:
return self.user_data["is_private"]
def lowerCAmelCase ( lowerCAmelCase_ = "github" )-> None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
lowerCAmelCase_ : Tuple = InstagramUser(lowerCAmelCase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowerCAmelCase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Optional[int] =InstagramUser("""github""")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""") | 619 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_UpperCAmelCase : Any ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS)
_UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_UpperCAmelCase : Any ={
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : Any = None
# source code of `config_class`
lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ )
lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase_ : List[str] = ckpt_name
break
return checkpoint
def lowerCAmelCase ( )-> Optional[Any]:
lowerCAmelCase_ : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 619 | 1 |
def lowerCAmelCase ( )-> int:
return 1
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ = 200 )-> int:
return two_pound(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 619 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple:
lowerCAmelCase_ : Optional[int] = scheduler
lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers]
lowerCAmelCase_ : str = split_batches
lowerCAmelCase_ : Any = step_with_optimizer
lowerCAmelCase_ : Optional[Any] = GradientState()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowercase , **__lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowercase , **__lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes
for _ in range(__lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowercase , **__lowercase )
else:
self.scheduler.step(*__lowercase , **__lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
return self.scheduler.get_last_lr()
def lowercase_ ( self ) -> List[str]:
return self.scheduler.state_dict()
def lowercase_ ( self , __lowercase ) -> int:
self.scheduler.load_state_dict(__lowercase )
def lowercase_ ( self ) -> Tuple:
return self.scheduler.get_lr()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> int:
return self.scheduler.print_lr(*__lowercase , **__lowercase ) | 619 | 1 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
_UpperCAmelCase : Optional[int] =logging.getLogger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = """token-classification"""
def __init__( self , __lowercase ) -> Optional[int]:
if type(__lowercase ) == dict:
lowerCAmelCase_ : int = Namespace(**__lowercase )
lowerCAmelCase_ : str = import_module('''tasks''' )
try:
lowerCAmelCase_ : Any = getattr(__lowercase , hparams.task_type )
lowerCAmelCase_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
lowerCAmelCase_ : str = self.token_classification_task.get_labels(hparams.labels )
lowerCAmelCase_ : int = CrossEntropyLoss().ignore_index
super().__init__(__lowercase , len(self.labels ) , self.mode )
def lowercase_ ( self , **__lowercase ) -> Any:
return self.model(**__lowercase )
def lowercase_ ( self , __lowercase , __lowercase ) -> int:
lowerCAmelCase_ : List[Any] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase_ : List[Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase_ : Dict = self(**__lowercase )
lowerCAmelCase_ : int = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : List[str] = self.hparams
for mode in ["train", "dev", "test"]:
lowerCAmelCase_ : List[str] = self._feature_file(__lowercase )
if os.path.exists(__lowercase ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __lowercase )
lowerCAmelCase_ : Union[str, Any] = torch.load(__lowercase )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
lowerCAmelCase_ : Tuple = self.token_classification_task.read_examples_from_file(args.data_dir , __lowercase )
lowerCAmelCase_ : str = self.token_classification_task.convert_examples_to_features(
__lowercase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__lowercase , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __lowercase )
torch.save(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = False ) -> DataLoader:
lowerCAmelCase_ : List[Any] = self._feature_file(__lowercase )
logger.info('''Loading features from cached file %s''' , __lowercase )
lowerCAmelCase_ : List[str] = torch.load(__lowercase )
lowerCAmelCase_ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCAmelCase_ : Tuple = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowerCAmelCase_ : Optional[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowerCAmelCase_ : Optional[int] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowerCAmelCase_ : Tuple = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__lowercase , __lowercase , __lowercase , __lowercase ) , batch_size=__lowercase )
def lowercase_ ( self , __lowercase , __lowercase ) -> Dict:
"""Compute validation""" ""
lowerCAmelCase_ : Tuple = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase_ : Dict = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase_ : str = self(**__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = outputs[:2]
lowerCAmelCase_ : int = logits.detach().cpu().numpy()
lowerCAmelCase_ : Any = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowercase_ ( self , __lowercase ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
lowerCAmelCase_ : List[Any] = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
lowerCAmelCase_ : Dict = np.argmax(__lowercase , axis=2 )
lowerCAmelCase_ : List[Any] = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
lowerCAmelCase_ : List[str] = dict(enumerate(self.labels ) )
lowerCAmelCase_ : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase_ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowerCAmelCase_ : str = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__lowercase , __lowercase ),
'''precision''': precision_score(__lowercase , __lowercase ),
'''recall''': recall_score(__lowercase , __lowercase ),
'''f1''': fa_score(__lowercase , __lowercase ),
}
lowerCAmelCase_ : List[str] = dict(results.items() )
lowerCAmelCase_ : Union[str, Any] = results
return ret, preds_list, out_label_list
def lowercase_ ( self , __lowercase ) -> List[str]:
# when stable
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self._eval_end(__lowercase )
lowerCAmelCase_ : Union[str, Any] = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowercase_ ( self , __lowercase ) -> Optional[int]:
# updating to test_epoch_end instead of deprecated test_end
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = self._eval_end(__lowercase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowerCAmelCase_ : Tuple = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowercase_ ( __lowercase , __lowercase ) -> Optional[int]:
# Add NER specific options
BaseTransformer.add_model_specific_args(__lowercase , __lowercase )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__lowercase , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=1_2_8 , type=__lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__lowercase , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__lowercase , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
_UpperCAmelCase : int =argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
_UpperCAmelCase : Dict =NERTransformer.add_model_specific_args(parser, os.getcwd())
_UpperCAmelCase : List[str] =parser.parse_args()
_UpperCAmelCase : List[Any] =NERTransformer(args)
_UpperCAmelCase : Optional[int] =generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
_UpperCAmelCase : Optional[int] =sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
_UpperCAmelCase : int =model.load_from_checkpoint(checkpoints[-1])
trainer.test(model) | 619 |
from manim import *
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 )
lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
lowerCAmelCase_ : str = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 )
lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
lowerCAmelCase_ : int = []
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
rect.set_stroke(__lowercase )
lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 )
self.add(__lowercase )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 )
target.move_to(__lowercase )
ckpt_arr.append(__lowercase )
lowerCAmelCase_ : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_ : str = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 )
lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) )
lowerCAmelCase_ : int = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : int = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(FadeOut(__lowercase ) )
lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) )
self.play(
FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , )
self.wait() | 619 | 1 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers | 619 |
_UpperCAmelCase : Dict =[
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[str] = 0
while place < len(lowerCAmelCase_ ):
if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : List[Any] = []
for arabic, roman in ROMAN:
((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase : Any =logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str
SCREAMING_SNAKE_CASE__ : str
SCREAMING_SNAKE_CASE__ : Optional[str] = None
SCREAMING_SNAKE_CASE__ : Optional[str] = None
SCREAMING_SNAKE_CASE__ : Optional[str] = None
@dataclass(frozen=UpperCAmelCase__ )
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[int]
SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None
SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None
SCREAMING_SNAKE_CASE__ : Optional[Union[int, float]] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[InputFeatures]
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase=False , __lowercase = False , ) -> Tuple:
lowerCAmelCase_ : Optional[int] = hans_processors[task]()
lowerCAmelCase_ : str = os.path.join(
__lowercase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(__lowercase ) , __lowercase , ) , )
lowerCAmelCase_ : Any = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = label_list[2], label_list[1]
lowerCAmelCase_ : Union[str, Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase_ : Dict = cached_features_file + '''.lock'''
with FileLock(__lowercase ):
if os.path.exists(__lowercase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
lowerCAmelCase_ : Optional[int] = torch.load(__lowercase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
lowerCAmelCase_ : Optional[int] = (
processor.get_dev_examples(__lowercase ) if evaluate else processor.get_train_examples(__lowercase )
)
logger.info('''Training examples: %s''' , len(__lowercase ) )
lowerCAmelCase_ : Any = hans_convert_examples_to_features(__lowercase , __lowercase , __lowercase , __lowercase )
logger.info('''Saving features into cached file %s''' , __lowercase )
torch.save(self.features , __lowercase )
def __len__( self ) -> List[str]:
return len(self.features )
def __getitem__( self , __lowercase ) -> InputFeatures:
return self.features[i]
def lowercase_ ( self ) -> List[str]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[InputFeatures]
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase = 1_2_8 , __lowercase=False , __lowercase = False , ) -> Optional[Any]:
lowerCAmelCase_ : Dict = hans_processors[task]()
lowerCAmelCase_ : int = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = label_list[2], label_list[1]
lowerCAmelCase_ : Dict = label_list
lowerCAmelCase_ : List[str] = processor.get_dev_examples(__lowercase ) if evaluate else processor.get_train_examples(__lowercase )
lowerCAmelCase_ : str = hans_convert_examples_to_features(__lowercase , __lowercase , __lowercase , __lowercase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(__lowercase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCAmelCase_ : Tuple = tf.data.Dataset.from_generator(
__lowercase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def lowercase_ ( self ) -> Optional[int]:
return self.dataset
def __len__( self ) -> str:
return len(self.features )
def __getitem__( self , __lowercase ) -> InputFeatures:
return self.features[i]
def lowercase_ ( self ) -> Any:
return self.label_list
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def lowercase_ ( self , __lowercase ) -> Optional[Any]:
return self._create_examples(self._read_tsv(os.path.join(__lowercase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def lowercase_ ( self , __lowercase ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(__lowercase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def lowercase_ ( self ) -> int:
return ["contradiction", "entailment", "neutral"]
def lowercase_ ( self , __lowercase , __lowercase ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = []
for i, line in enumerate(__lowercase ):
if i == 0:
continue
lowerCAmelCase_ : Tuple = '''%s-%s''' % (set_type, line[0])
lowerCAmelCase_ : List[str] = line[5]
lowerCAmelCase_ : List[Any] = line[6]
lowerCAmelCase_ : int = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
lowerCAmelCase_ : Any = line[0]
examples.append(InputExample(guid=__lowercase , text_a=__lowercase , text_b=__lowercase , label=__lowercase , pairID=__lowercase ) )
return examples
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> Any:
lowerCAmelCase_ : Optional[Any] = {label: i for i, label in enumerate(lowerCAmelCase_ )}
lowerCAmelCase_ : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCAmelCase_ ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
lowerCAmelCase_ : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' , truncation=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , )
lowerCAmelCase_ : Optional[int] = label_map[example.label] if example.label in label_map else 0
lowerCAmelCase_ : List[str] = int(example.pairID )
features.append(InputFeatures(**lowerCAmelCase_ , label=lowerCAmelCase_ , pairID=lowerCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
_UpperCAmelCase : List[Any] ={
"""hans""": 3,
}
_UpperCAmelCase : str ={
"""hans""": HansProcessor,
} | 619 |
import csv
import tweepy
# Twitter API credentials
_UpperCAmelCase : int =""""""
_UpperCAmelCase : Optional[int] =""""""
_UpperCAmelCase : Dict =""""""
_UpperCAmelCase : str =""""""
def lowerCAmelCase ( lowerCAmelCase_ )-> None:
# authorize twitter, initialize tweepy
lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ )
auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase_ : Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# save the id of the oldest tweet less one
lowerCAmelCase_ : str = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase_ ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase_ : Optional[Any] = api.user_timeline(
screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ )
# save most recent tweets
alltweets.extend(lowerCAmelCase_ )
# update the id of the oldest tweet less one
lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1
print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(lowerCAmelCase_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""") | 619 | 1 |
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 |
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : str = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : List[Any] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Any = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Optional[Any] = list(range(2 , n + 1 ) )
lowerCAmelCase_ : List[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : Tuple = 0
# filters actual prime numbers.
lowerCAmelCase_ : List[str] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : Any = 2
lowerCAmelCase_ : List[str] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Optional[int] = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Dict = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : Union[str, Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : List[Any] = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : Any = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : List[Any] = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : Tuple = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[str] = 0
while numbera != 0:
lowerCAmelCase_ : int = numbera % numbera
lowerCAmelCase_ : Union[str, Any] = numbera
lowerCAmelCase_ : Tuple = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : Tuple = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : str = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : str = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : str = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : Dict = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[int] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : str = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Dict = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Tuple = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : List[str] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Tuple = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Optional[Any] = 1
lowerCAmelCase_ : Tuple = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Any = ans
ans += fiba
lowerCAmelCase_ : Dict = tmp
return ans | 619 | 1 |
from collections.abc import Sequence
from queue import Queue
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None ) -> List[Any]:
lowerCAmelCase_ : str = start
lowerCAmelCase_ : str = end
lowerCAmelCase_ : Union[str, Any] = val
lowerCAmelCase_ : Tuple = (start + end) // 2
lowerCAmelCase_ : List[Any] = left
lowerCAmelCase_ : Union[str, Any] = right
def __repr__( self ) -> Tuple:
return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase ) -> int:
lowerCAmelCase_ : int = collection
lowerCAmelCase_ : List[Any] = function
if self.collection:
lowerCAmelCase_ : Union[str, Any] = self._build_tree(0 , len(__lowercase ) - 1 )
def lowercase_ ( self , __lowercase , __lowercase ) -> Any:
self._update_tree(self.root , __lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase ) -> Tuple:
return self._query_range(self.root , __lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase ) -> Dict:
if start == end:
return SegmentTreeNode(__lowercase , __lowercase , self.collection[start] )
lowerCAmelCase_ : Optional[Any] = (start + end) // 2
lowerCAmelCase_ : Dict = self._build_tree(__lowercase , __lowercase )
lowerCAmelCase_ : List[Any] = self._build_tree(mid + 1 , __lowercase )
return SegmentTreeNode(__lowercase , __lowercase , self.fn(left.val , right.val ) , __lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> List[str]:
if node.start == i and node.end == i:
lowerCAmelCase_ : List[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , __lowercase , __lowercase )
else:
self._update_tree(node.right , __lowercase , __lowercase )
lowerCAmelCase_ : List[str] = self.fn(node.left.val , node.right.val )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> int:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __lowercase , __lowercase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __lowercase , node.mid ) , self._query_range(node.right , node.mid + 1 , __lowercase ) , )
else:
# range in right child tree
return self._query_range(node.right , __lowercase , __lowercase )
def lowercase_ ( self ) -> Any:
if self.root is not None:
lowerCAmelCase_ : List[str] = Queue()
queue.put(self.root )
while not queue.empty():
lowerCAmelCase_ : List[str] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
_UpperCAmelCase : Any =SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print() | 619 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_UpperCAmelCase : Tuple =10
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
if array[i] == target:
return i
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : int = len(lowerCAmelCase_ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1
lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : Dict = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : List[Any] = two_third + 1
else:
lowerCAmelCase_ : Union[str, Any] = one_third + 1
lowerCAmelCase_ : Tuple = two_third - 1
else:
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1
lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
_UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip())
_UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target)
_UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""") | 619 | 1 |
import argparse
import os
import re
import packaging.version
_UpperCAmelCase : Tuple ="""examples/"""
_UpperCAmelCase : Any ={
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_UpperCAmelCase : Tuple ={
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_UpperCAmelCase : Union[str, Any] ="""README.md"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase_ : Any = f.read()
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCAmelCase_ : Dict = replace.replace('''VERSION''' , lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = re_pattern.sub(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
for folder, directories, fnames in os.walk(lowerCAmelCase_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , pattern='''examples''' )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=False )-> Optional[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not patch:
update_version_in_examples(lowerCAmelCase_ )
def lowerCAmelCase ( )-> List[Any]:
lowerCAmelCase_ : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
lowerCAmelCase_ : List[Any] = '''1. Want to contribute a new model?'''
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase_ : Union[str, Any] = f.readlines()
# Find the start of the list.
lowerCAmelCase_ : str = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase_ : Tuple = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowerCAmelCase_ : str = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lowerCAmelCase_ )
def lowerCAmelCase ( )-> Dict:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowerCAmelCase_ : Union[str, Any] = f.read()
lowerCAmelCase_ : Union[str, Any] = REPLACE_PATTERNS['''init'''][0].search(lowerCAmelCase_ ).groups()[0]
return packaging.version.parse(lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_=False )-> List[Any]:
lowerCAmelCase_ : Any = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowerCAmelCase_ : str = default_version.base_version
elif patch:
lowerCAmelCase_ : Dict = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowerCAmelCase_ : Optional[Any] = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowerCAmelCase_ : Any = input(f"""Which version are you releasing? [{default_version}]""" )
if len(lowerCAmelCase_ ) == 0:
lowerCAmelCase_ : Optional[Any] = default_version
print(f"""Updating version to {version}.""" )
global_version_update(lowerCAmelCase_ , patch=lowerCAmelCase_ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCAmelCase ( )-> Any:
lowerCAmelCase_ : str = get_version()
lowerCAmelCase_ : int = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowerCAmelCase_ : Any = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase_ : Optional[int] = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(lowerCAmelCase_ ) == 0:
lowerCAmelCase_ : str = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(lowerCAmelCase_ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_UpperCAmelCase : List[str] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work() | 619 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Union[str, Any] ={
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] =["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] =["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple =[
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
_UpperCAmelCase : Dict =logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] ={
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = """layoutlmv3"""
def __init__( self , __lowercase=5_0_2_6_5 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=2 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase=1_0_2_4 , __lowercase=1_2_8 , __lowercase=1_2_8 , __lowercase=True , __lowercase=3_2 , __lowercase=1_2_8 , __lowercase=6_4 , __lowercase=2_5_6 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=2_2_4 , __lowercase=3 , __lowercase=1_6 , __lowercase=None , **__lowercase , ) -> Dict:
super().__init__(
vocab_size=__lowercase , hidden_size=__lowercase , num_hidden_layers=__lowercase , num_attention_heads=__lowercase , intermediate_size=__lowercase , hidden_act=__lowercase , hidden_dropout_prob=__lowercase , attention_probs_dropout_prob=__lowercase , max_position_embeddings=__lowercase , type_vocab_size=__lowercase , initializer_range=__lowercase , layer_norm_eps=__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase , )
lowerCAmelCase_ : Tuple = max_ad_position_embeddings
lowerCAmelCase_ : List[str] = coordinate_size
lowerCAmelCase_ : Optional[Any] = shape_size
lowerCAmelCase_ : Optional[Any] = has_relative_attention_bias
lowerCAmelCase_ : List[str] = rel_pos_bins
lowerCAmelCase_ : str = max_rel_pos
lowerCAmelCase_ : Optional[Any] = has_spatial_attention_bias
lowerCAmelCase_ : Optional[int] = rel_ad_pos_bins
lowerCAmelCase_ : List[str] = max_rel_ad_pos
lowerCAmelCase_ : List[Any] = text_embed
lowerCAmelCase_ : Tuple = visual_embed
lowerCAmelCase_ : List[str] = input_size
lowerCAmelCase_ : int = num_channels
lowerCAmelCase_ : Optional[int] = patch_size
lowerCAmelCase_ : Any = classifier_dropout
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = version.parse("""1.12""" )
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def lowercase_ ( self ) -> float:
return 1e-5
@property
def lowercase_ ( self ) -> int:
return 1_2
def lowercase_ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , __lowercase = 3 , __lowercase = 4_0 , __lowercase = 4_0 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , '''apply_ocr''' , __lowercase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ : Any = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase_ : Optional[Any] = processor.tokenizer.num_special_tokens_to_add(__lowercase )
lowerCAmelCase_ : Union[str, Any] = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase_ : Any = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowerCAmelCase_ : List[Any] = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowerCAmelCase_ : Optional[Any] = self._generate_dummy_images(__lowercase , __lowercase , __lowercase , __lowercase )
lowerCAmelCase_ : str = dict(
processor(
__lowercase , text=__lowercase , boxes=__lowercase , return_tensors=__lowercase , ) )
return inputs | 619 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_UpperCAmelCase : Any =False
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> Union[str, Any]:
return 1_2
@property
def lowercase_ ( self ) -> Any:
return 1_2
@property
def lowercase_ ( self ) -> Optional[Any]:
return 3_2
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Any = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowercase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__lowercase )
@property
def lowercase_ ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = 1_2
lowerCAmelCase_ : int = 1_2
lowerCAmelCase_ : Union[str, Any] = {
'''attention_bias''': True,
'''cross_attention_dim''': 3_2,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 3_2,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
lowerCAmelCase_ : List[str] = TransformeraDModel(**__lowercase )
return model
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : List[Any] = '''cpu'''
lowerCAmelCase_ : Any = self.dummy_vqvae
lowerCAmelCase_ : str = self.dummy_text_encoder
lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer
lowerCAmelCase_ : int = self.dummy_transformer
lowerCAmelCase_ : List[str] = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowercase )
lowerCAmelCase_ : Dict = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : int = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : int = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Tuple = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : List[Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Optional[int] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = '''cpu'''
lowerCAmelCase_ : str = self.dummy_vqvae
lowerCAmelCase_ : Dict = self.dummy_text_encoder
lowerCAmelCase_ : List[Any] = self.dummy_tokenizer
lowerCAmelCase_ : Union[str, Any] = self.dummy_transformer
lowerCAmelCase_ : Tuple = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase_ : str = LearnedClassifierFreeSamplingEmbeddings(
learnable=__lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowerCAmelCase_ : List[str] = VQDiffusionPipeline(
vqvae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , transformer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
lowerCAmelCase_ : Union[str, Any] = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowerCAmelCase_ : Any = '''teddy bear playing in the pool'''
lowerCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Dict = pipe([prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' )
lowerCAmelCase_ : str = output.images
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pipe(
[prompt] , generator=__lowercase , output_type='''np''' , return_dict=__lowercase , num_inference_steps=2 )[0]
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
lowerCAmelCase_ : Union[str, Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
lowerCAmelCase_ : str = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
lowerCAmelCase_ : List[Any] = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowerCAmelCase_ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
lowerCAmelCase_ : Optional[int] = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__lowercase , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0 | 619 | 1 |
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> None:
lowerCAmelCase_ : Dict = len(lowerCAmelCase_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCAmelCase_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase_ , lowerCAmelCase_ , )
def lowerCAmelCase ( lowerCAmelCase_ )-> None:
lowerCAmelCase_ : list[list[str]] = []
depth_first_search([] , [] , [] , lowerCAmelCase_ , lowerCAmelCase_ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCAmelCase_ )
print('''''' )
print(len(lowerCAmelCase_ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4) | 619 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase : Dict =None
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : Any ={"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Any ={
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : Dict ={
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase : Tuple ="""▁"""
# Segments (not really needed)
_UpperCAmelCase : str =0
_UpperCAmelCase : List[str] =1
_UpperCAmelCase : int =2
_UpperCAmelCase : Any =3
_UpperCAmelCase : List[Any] =4
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = """left"""
SCREAMING_SNAKE_CASE__ : List[Any] = XLNetTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<unk>" , __lowercase="<sep>" , __lowercase="<pad>" , __lowercase="<cls>" , __lowercase="<mask>" , __lowercase=["<eop>", "<eod>"] , **__lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : Dict = do_lower_case
lowerCAmelCase_ : Dict = remove_space
lowerCAmelCase_ : List[str] = keep_accents
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : str = False if not self.vocab_file else True
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : str = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 619 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : List[str] =logging.get_logger(__name__)
_UpperCAmelCase : List[str] ={
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = """falcon"""
SCREAMING_SNAKE_CASE__ : List[Any] = ["""past_key_values"""]
def __init__( self , __lowercase=6_5_0_2_4 , __lowercase=4_5_4_4 , __lowercase=3_2 , __lowercase=7_1 , __lowercase=1e-5 , __lowercase=0.02 , __lowercase=True , __lowercase=0.0 , __lowercase=0.0 , __lowercase=None , __lowercase=False , __lowercase=False , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=1_1 , __lowercase=1_1 , **__lowercase , ) -> Any:
lowerCAmelCase_ : int = vocab_size
# Backward compatibility with n_embed kwarg
lowerCAmelCase_ : List[Any] = kwargs.pop('''n_embed''' , __lowercase )
lowerCAmelCase_ : List[Any] = hidden_size if n_embed is None else n_embed
lowerCAmelCase_ : Tuple = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : int = layer_norm_epsilon
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Tuple = use_cache
lowerCAmelCase_ : Optional[Any] = hidden_dropout
lowerCAmelCase_ : Any = attention_dropout
lowerCAmelCase_ : Tuple = bos_token_id
lowerCAmelCase_ : Optional[Any] = eos_token_id
lowerCAmelCase_ : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
lowerCAmelCase_ : str = alibi
lowerCAmelCase_ : Union[str, Any] = new_decoder_architecture
lowerCAmelCase_ : List[Any] = multi_query # Ignored when new_decoder_architecture is True
lowerCAmelCase_ : Any = parallel_attn
lowerCAmelCase_ : str = bias
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
@property
def lowercase_ ( self ) -> Optional[int]:
return self.hidden_size // self.num_attention_heads
@property
def lowercase_ ( self ) -> Any:
return not self.alibi | 619 |
import math
import qiskit
def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts:
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' )
lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowerCAmelCase_ : Any = [input_a, input_a, carry_in]
lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits
lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""") | 619 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCAmelCase : Tuple ={
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] =[
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 |
import re
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
lowerCAmelCase_ : Tuple = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 619 | 1 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
_UpperCAmelCase : List[Any] =logging.getLogger(__name__)
if __name__ == "__main__":
_UpperCAmelCase : Dict =argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=3_0522, type=int)
_UpperCAmelCase : str =parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, """rb""") as fp:
_UpperCAmelCase : str =pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
_UpperCAmelCase : List[Any] =Counter()
for tk_ids in data:
counter.update(tk_ids)
_UpperCAmelCase : Tuple =[0] * args.vocab_size
for k, v in counter.items():
_UpperCAmelCase : Union[str, Any] =v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL) | 619 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = size if size is not None else {'''shortest_edge''': 3_8_4}
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = do_resize
lowerCAmelCase_ : Optional[int] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCAmelCase_ : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCAmelCase_ : Tuple = resample
lowerCAmelCase_ : Optional[int] = do_rescale
lowerCAmelCase_ : Any = rescale_factor
lowerCAmelCase_ : List[str] = do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray:
lowerCAmelCase_ : Optional[Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowerCAmelCase_ : Optional[int] = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCAmelCase_ : Optional[Any] = int(shortest_edge / crop_pct )
lowerCAmelCase_ : List[str] = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : List[Any] = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Any:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Any = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase_ : str = resample if resample is not None else self.resample
lowerCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : int = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : int = size if size is not None else self.size
lowerCAmelCase_ : List[str] = get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCAmelCase_ : Tuple = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Optional[Any] = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
lowerCAmelCase_ : Union[str, Any] = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_rescale:
lowerCAmelCase_ : Any = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
lowerCAmelCase_ : List[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
lowerCAmelCase_ : Optional[Any] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
lowerCAmelCase_ : Dict = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase ) | 619 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Optional[int] ={
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict =[
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] ={
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese"""
def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : str = intermediate_multiple_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Dict = rotary_pct
lowerCAmelCase_ : Union[str, Any] = rotary_emb_base
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : Dict = hidden_dropout | 619 | 1 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_UpperCAmelCase : Dict ="""src/transformers"""
_UpperCAmelCase : int ="""docs/source/en/tasks"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase_ : Optional[int] = f.readlines()
# Find the start prompt.
lowerCAmelCase_ : List[str] = 0
while not lines[start_index].startswith(lowerCAmelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase_ : str = start_index
while not lines[end_index].startswith(lowerCAmelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Dict =direct_transformers_import(TRANSFORMERS_PATH)
_UpperCAmelCase : int ={
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_UpperCAmelCase : Optional[int] ={
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def lowerCAmelCase ( lowerCAmelCase_ )-> Any:
lowerCAmelCase_ : Optional[int] = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase_ : Tuple = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCAmelCase_ , set() )
lowerCAmelCase_ : Dict = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=False )-> Dict:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any = _find_text_in_file(
filename=os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
lowerCAmelCase_ : Optional[int] = get_model_list_for_task(lowerCAmelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
''' to fix this.''' )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_UpperCAmelCase : Union[str, Any] =parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 619 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=2 , __lowercase=True , __lowercase=False , __lowercase=1_0 , __lowercase=3 , __lowercase=3_2 * 4 , __lowercase=3_2 * 6 , __lowercase=4 , __lowercase=3_2 , ) -> Union[str, Any]:
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss
lowerCAmelCase_ : List[Any] = num_queries
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : Dict = min_size
lowerCAmelCase_ : List[str] = max_size
lowerCAmelCase_ : Any = num_labels
lowerCAmelCase_ : str = mask_feature_size
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowercase )
lowerCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase )
lowerCAmelCase_ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5
).float()
lowerCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long()
lowerCAmelCase_ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = self.prepare_config_and_inputs()
lowerCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Optional[int] = output.encoder_hidden_states
lowerCAmelCase_ : List[Any] = output.pixel_decoder_hidden_states
lowerCAmelCase_ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , config.decoder_config.decoder_layers )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> int:
with torch.no_grad():
lowerCAmelCase_ : List[Any] = MaskFormerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Optional[int] = model(__lowercase , output_hidden_states=__lowercase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
lowerCAmelCase_ : Any = MaskFormerForInstanceSegmentation(config=__lowercase )
model.to(__lowercase )
model.eval()
def comm_check_on_output(__lowercase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase_ : int = model(pixel_values=__lowercase , pixel_mask=__lowercase )
lowerCAmelCase_ : Any = model(__lowercase )
comm_check_on_output(__lowercase )
lowerCAmelCase_ : List[Any] = model(
pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
comm_check_on_output(__lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Any = MaskFormerModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def lowercase_ ( self ) -> Any:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowercase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowercase_ ( self ) -> str:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Tuple = model_class(__lowercase )
lowerCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase_ : str = MaskFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Tuple = (self.model_tester.min_size,) * 2
lowerCAmelCase_ : List[Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowercase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowercase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowercase ).long(),
}
lowerCAmelCase_ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowercase )
lowerCAmelCase_ : Dict = model(**__lowercase )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[str] = model_class(__lowercase ).to(__lowercase )
lowerCAmelCase_ : int = model(**__lowercase , output_attentions=__lowercase )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : int = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Optional[Any] = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss
loss.backward()
def lowercase_ ( self ) -> Optional[int]:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase_ : Any = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Any = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCAmelCase_ : Any = model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
lowerCAmelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_UpperCAmelCase : Dict =1E-4
def lowerCAmelCase ( )-> Any:
lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ ( self ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowercase )
lowerCAmelCase_ : Dict = self.default_image_processor
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Any = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(**__lowercase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
lowerCAmelCase_ : int = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : int = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Tuple = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Tuple = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
lowerCAmelCase_ : int = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : Dict = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : int = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Dict = image_processor(__lowercase , return_tensors='''pt''' ).to(__lowercase )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
# masks_queries_logits
lowerCAmelCase_ : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase_ : Any = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
lowerCAmelCase_ : str = torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
lowerCAmelCase_ : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ : int = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowercase )
.eval()
)
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Union[str, Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowerCAmelCase_ : Optional[Any] = inputs['''pixel_values'''].to(__lowercase )
lowerCAmelCase_ : int = [el.to(__lowercase ) for el in inputs['''mask_labels''']]
lowerCAmelCase_ : Optional[Any] = [el.to(__lowercase ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowerCAmelCase_ : str = model(**__lowercase )
self.assertTrue(outputs.loss is not None ) | 619 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = """linear"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine"""
SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts"""
SCREAMING_SNAKE_CASE__ : List[str] = """polynomial"""
SCREAMING_SNAKE_CASE__ : Dict = """constant"""
SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup"""
SCREAMING_SNAKE_CASE__ : str = """piecewise_constant"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple:
return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) )
return 1.0
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int:
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' )
lowerCAmelCase_ : int = int(lowerCAmelCase_ )
lowerCAmelCase_ : str = float(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = value
lowerCAmelCase_ : int = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ):
def rule_func(lowerCAmelCase_ ) -> float:
lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any:
lowerCAmelCase_ : Dict = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase_ : List[Any] = lr_init - lr_end
lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps
lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] ={
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) | 619 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default="""automatic-speech-recognition""", metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""audio""": Audio()} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
SCREAMING_SNAKE_CASE__ : str = "audio"
SCREAMING_SNAKE_CASE__ : str = "transcription"
def lowercase_ ( self , __lowercase ) -> int:
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowercase ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
lowerCAmelCase_ : List[str] = copy.deepcopy(self )
lowerCAmelCase_ : Optional[Any] = self.input_schema.copy()
lowerCAmelCase_ : Optional[Any] = features[self.audio_column]
lowerCAmelCase_ : List[str] = input_schema
return task_template
@property
def lowercase_ ( self ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"} | 619 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[int] ={
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any =["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int =[
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str =[
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] =[
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 619 |
_UpperCAmelCase : int =frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : List[Any] =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : Dict =frozenset([])
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : Tuple =frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : int =frozenset(["""image"""])
_UpperCAmelCase : str =frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : int =frozenset(["""prompt""", """image""", """negative_prompt"""])
_UpperCAmelCase : Optional[int] =frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_UpperCAmelCase : Optional[int] =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
_UpperCAmelCase : Optional[Any] =frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Optional[Any] =frozenset(["""image""", """mask_image"""])
_UpperCAmelCase : Union[str, Any] =frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""example_image""", """image""", """mask_image"""])
_UpperCAmelCase : Any =frozenset(["""class_labels"""])
_UpperCAmelCase : List[Any] =frozenset(["""class_labels"""])
_UpperCAmelCase : int =frozenset(["""batch_size"""])
_UpperCAmelCase : str =frozenset([])
_UpperCAmelCase : str =frozenset(["""batch_size"""])
_UpperCAmelCase : Optional[Any] =frozenset([])
_UpperCAmelCase : Tuple =frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_UpperCAmelCase : Tuple =frozenset(["""prompt""", """negative_prompt"""])
_UpperCAmelCase : List[str] =frozenset(["""input_tokens"""])
_UpperCAmelCase : Optional[Any] =frozenset(["""input_tokens"""]) | 619 | 1 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , )-> Tuple:
if config_name_or_path is None:
lowerCAmelCase_ : str = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
lowerCAmelCase_ : Optional[int] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCAmelCase_ : List[Any] = question_encoder_name_or_path
lowerCAmelCase_ : Dict = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
lowerCAmelCase_ : str = RagConfig.from_pretrained(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
lowerCAmelCase_ : Dict = AutoConfig.from_pretrained(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = gen_config
lowerCAmelCase_ : Any = question_encoder_config
lowerCAmelCase_ : List[Any] = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
rag_model.save_pretrained(lowerCAmelCase_ )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase_ )
# Save tokenizers.
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
lowerCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
_UpperCAmelCase : Optional[int] =parser.parse_args()
_UpperCAmelCase : Any =Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
) | 619 |
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int:
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[Any] = {1: 1}
for inputa in range(2 , lowerCAmelCase_ ):
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Dict = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ : Tuple = counter
if counter > pre_counter:
lowerCAmelCase_ : Optional[int] = inputa
lowerCAmelCase_ : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip()))) | 619 | 1 |
import numpy as np
def lowerCAmelCase ( lowerCAmelCase_ )-> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder"""
SCREAMING_SNAKE_CASE__ : str = True
def __init__( self , **__lowercase ) -> Union[str, Any]:
super().__init__(**__lowercase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase_ : str = kwargs.pop('''encoder''' )
lowerCAmelCase_ : int = encoder_config.pop('''model_type''' )
lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' )
lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : Any = True
@classmethod
def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCAmelCase_ : int = True
lowerCAmelCase_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : List[str] = self.encoder.to_dict()
lowerCAmelCase_ : Dict = self.decoder.to_dict()
lowerCAmelCase_ : Optional[Any] = self.__class__.model_type
return output | 619 | 1 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowerCAmelCase_ : Optional[Any] = dataset_size < in_memory_max_size
else:
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Optional[int] = is_small_dataset(lowerCAmelCase_ )
assert result == expected | 619 |
from __future__ import annotations
from math import pi
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
import argparse
import os
import re
_UpperCAmelCase : Optional[Any] ="""src/diffusers"""
# Pattern that looks at the indentation in a line.
_UpperCAmelCase : Tuple =re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
_UpperCAmelCase : Dict =re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_UpperCAmelCase : Dict =re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
_UpperCAmelCase : Optional[Any] =re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_UpperCAmelCase : Tuple =re.compile(R"""\[([^\]]+)\]""")
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
lowerCAmelCase_ : List[Any] = _re_indent.search(lowerCAmelCase_ )
return "" if search is None else search.groups()[0]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , lowerCAmelCase_=None )-> str:
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : str = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCAmelCase_ ):
index += 1
lowerCAmelCase_ : List[str] = ['''\n'''.join(lines[:index] )]
else:
lowerCAmelCase_ : Optional[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase_ : str = [lines[index]]
index += 1
while index < len(lowerCAmelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCAmelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCAmelCase_ ) )
if index < len(lowerCAmelCase_ ) - 1:
lowerCAmelCase_ : int = [lines[index + 1]]
index += 1
else:
lowerCAmelCase_ : Tuple = []
else:
blocks.append('''\n'''.join(lowerCAmelCase_ ) )
lowerCAmelCase_ : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCAmelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCAmelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCAmelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
def _inner(lowerCAmelCase_ ):
return key(lowerCAmelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=None )-> Dict:
# If no key is provided, we use a noop.
def noop(lowerCAmelCase_ ):
return x
if key is None:
lowerCAmelCase_ : Dict = noop
# Constants are all uppercase, they go first.
lowerCAmelCase_ : Union[str, Any] = [obj for obj in objects if key(lowerCAmelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase_ : int = [obj for obj in objects if key(lowerCAmelCase_ )[0].isupper() and not key(lowerCAmelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase_ : Dict = [obj for obj in objects if not key(lowerCAmelCase_ )[0].isupper()]
lowerCAmelCase_ : Optional[int] = ignore_underscore(lowerCAmelCase_ )
return sorted(lowerCAmelCase_ , key=lowerCAmelCase_ ) + sorted(lowerCAmelCase_ , key=lowerCAmelCase_ ) + sorted(lowerCAmelCase_ , key=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
# This inner function sort imports between [ ].
def _replace(lowerCAmelCase_ ):
lowerCAmelCase_ : Union[str, Any] = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
lowerCAmelCase_ : Any = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ : Optional[Any] = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(lowerCAmelCase_ )] ) + "]"
lowerCAmelCase_ : str = import_statement.split('''\n''' )
if len(lowerCAmelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase_ : List[str] = 2 if lines[1].strip() == '''[''' else 1
lowerCAmelCase_ : List[Any] = [(i, _re_strip_line.search(lowerCAmelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase_ : Tuple = sort_objects(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[1] )
lowerCAmelCase_ : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCAmelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase_ : int = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase_ : List[Any] = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ : Optional[Any] = keys[:-1]
lowerCAmelCase_ : str = get_indent(lines[1] ) + ''', '''.join([f"""\"{k}\"""" for k in sort_objects(lowerCAmelCase_ )] )
return "\n".join(lowerCAmelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase_ : Optional[Any] = _re_bracket_content.sub(_replace , lowerCAmelCase_ )
return import_statement
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=True )-> Optional[int]:
with open(lowerCAmelCase_ , '''r''' ) as f:
lowerCAmelCase_ : int = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase_ : Dict = split_code_in_indented_blocks(
lowerCAmelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCAmelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase_ : List[str] = main_blocks[block_idx]
lowerCAmelCase_ : Any = block.split('''\n''' )
# Get to the start of the imports.
lowerCAmelCase_ : Optional[Any] = 0
while line_idx < len(lowerCAmelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase_ : Union[str, Any] = len(lowerCAmelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCAmelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase_ : int = '''\n'''.join(block_lines[line_idx:-1] )
lowerCAmelCase_ : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase_ : str = split_code_in_indented_blocks(lowerCAmelCase_ , indent_level=lowerCAmelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase_ : Any = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase_ : Tuple = [(pattern.search(lowerCAmelCase_ ).groups()[0] if pattern.search(lowerCAmelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase_ : str = [(i, key) for i, key in enumerate(lowerCAmelCase_ ) if key is not None]
lowerCAmelCase_ : Tuple = [x[0] for x in sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : List[str] = []
for i in range(len(lowerCAmelCase_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase_ : Optional[int] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowerCAmelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase_ : List[str] = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCAmelCase_ ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(lowerCAmelCase_ , '''w''' ) as f:
f.write('''\n'''.join(lowerCAmelCase_ ) )
def lowerCAmelCase ( lowerCAmelCase_=True )-> Optional[int]:
lowerCAmelCase_ : str = []
for root, _, files in os.walk(lowerCAmelCase_ ):
if "__init__.py" in files:
lowerCAmelCase_ : Tuple = sort_imports(os.path.join(lowerCAmelCase_ , '''__init__.py''' ) , check_only=lowerCAmelCase_ )
if result:
lowerCAmelCase_ : str = [os.path.join(lowerCAmelCase_ , '''__init__.py''' )]
if len(lowerCAmelCase_ ) > 0:
raise ValueError(f"""Would overwrite {len(lowerCAmelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
_UpperCAmelCase : Dict =argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_UpperCAmelCase : Optional[int] =parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 619 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = """linear"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cosine"""
SCREAMING_SNAKE_CASE__ : Dict = """cosine_with_restarts"""
SCREAMING_SNAKE_CASE__ : List[str] = """polynomial"""
SCREAMING_SNAKE_CASE__ : Dict = """constant"""
SCREAMING_SNAKE_CASE__ : List[str] = """constant_with_warmup"""
SCREAMING_SNAKE_CASE__ : str = """piecewise_constant"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> Tuple:
return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) )
return 1.0
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 )-> int:
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Union[str, Any] = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowerCAmelCase_ , lowerCAmelCase_ : str = rule_str.split(''':''' )
lowerCAmelCase_ : int = int(lowerCAmelCase_ )
lowerCAmelCase_ : str = float(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = value
lowerCAmelCase_ : int = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ):
def rule_func(lowerCAmelCase_ ) -> float:
lowerCAmelCase_ : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase_ : Tuple = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 )-> Optional[int]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 )-> List[Any]:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 )-> Dict:
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1e-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 )-> Any:
lowerCAmelCase_ : Dict = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowerCAmelCase_ ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase_ : List[Any] = lr_init - lr_end
lowerCAmelCase_ : Optional[Any] = num_training_steps - num_warmup_steps
lowerCAmelCase_ : Any = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] ={
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , )-> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = SchedulerType(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) | 619 | 1 |
import argparse
import json
from tqdm import tqdm
def lowerCAmelCase ( )-> Dict:
lowerCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=lowerCAmelCase_ , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=lowerCAmelCase_ , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=lowerCAmelCase_ , help='''where to store parsed gold_data_path file''' , )
lowerCAmelCase_ : List[str] = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
lowerCAmelCase_ : List[str] = json.load(lowerCAmelCase_ )
for dpr_record in tqdm(lowerCAmelCase_ ):
lowerCAmelCase_ : Tuple = dpr_record['''question''']
lowerCAmelCase_ : Union[str, Any] = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(lowerCAmelCase_ ) + '''\n''' )
if __name__ == "__main__":
main() | 619 |
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[Any] =logging.get_logger(__name__)
_UpperCAmelCase : Tuple ={
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """lilt"""
def __init__( self , __lowercase=3_0_5_2_2 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=2 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=0 , __lowercase="absolute" , __lowercase=None , __lowercase=4 , __lowercase=1_0_2_4 , **__lowercase , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Tuple = hidden_act
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Optional[int] = type_vocab_size
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Optional[Any] = layer_norm_eps
lowerCAmelCase_ : Optional[int] = position_embedding_type
lowerCAmelCase_ : int = classifier_dropout
lowerCAmelCase_ : List[Any] = channel_shrink_ratio
lowerCAmelCase_ : Optional[Any] = max_ad_position_embeddings | 619 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_UpperCAmelCase : Any ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[Any] =direct_transformers_import(PATH_TO_TRANSFORMERS)
_UpperCAmelCase : List[str] =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_UpperCAmelCase : Dict =re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_UpperCAmelCase : Any ={
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : Any = None
# source code of `config_class`
lowerCAmelCase_ : Optional[int] = inspect.getsource(lowerCAmelCase_ )
lowerCAmelCase_ : str = _re_checkpoint.findall(lowerCAmelCase_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowerCAmelCase_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase_ : Tuple = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase_ : List[str] = ckpt_name
break
return checkpoint
def lowerCAmelCase ( )-> Optional[Any]:
lowerCAmelCase_ : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase_ : int = get_checkpoint_from_config_class(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
lowerCAmelCase_ : List[Any] = '''\n'''.join(sorted(lowerCAmelCase_ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 619 | 1 |
# Algorithm for the pigeonhole sorting
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
lowerCAmelCase_ : List[Any] = min(lowerCAmelCase_ ) # min() finds the minimum value
lowerCAmelCase_ : Optional[int] = max(lowerCAmelCase_ ) # max() finds the maximum value
lowerCAmelCase_ : List[str] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowerCAmelCase_ : Tuple = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowerCAmelCase_ : int = 0
for count in range(lowerCAmelCase_ ):
while holes[count] > 0:
holes[count] -= 1
lowerCAmelCase_ : List[str] = count + min_val
i += 1
def lowerCAmelCase ( )-> List[str]:
lowerCAmelCase_ : Tuple = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowerCAmelCase_ )
print('''Sorted order is:''' , ''' '''.join(lowerCAmelCase_ ) )
if __name__ == "__main__":
main() | 619 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase = True , __lowercase = False ) -> Tuple:
lowerCAmelCase_ : Optional[int] = scheduler
lowerCAmelCase_ : Dict = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers]
lowerCAmelCase_ : str = split_batches
lowerCAmelCase_ : Any = step_with_optimizer
lowerCAmelCase_ : Optional[Any] = GradientState()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> Any:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowercase , **__lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowercase , **__lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCAmelCase_ : Optional[Any] = AcceleratorState().num_processes
for _ in range(__lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowercase , **__lowercase )
else:
self.scheduler.step(*__lowercase , **__lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
return self.scheduler.get_last_lr()
def lowercase_ ( self ) -> List[str]:
return self.scheduler.state_dict()
def lowercase_ ( self , __lowercase ) -> int:
self.scheduler.load_state_dict(__lowercase )
def lowercase_ ( self ) -> Tuple:
return self.scheduler.get_lr()
def lowercase_ ( self , *__lowercase , **__lowercase ) -> int:
return self.scheduler.print_lr(*__lowercase , **__lowercase ) | 619 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : List[Any] =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = ReformerTokenizer
SCREAMING_SNAKE_CASE__ : List[str] = ReformerTokenizerFast
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : List[str] = True
def lowercase_ ( self ) -> Optional[Any]:
super().setUp()
lowerCAmelCase_ : List[str] = ReformerTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Any = '''<s>'''
lowerCAmelCase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__lowercase ) , 1_0_0_0 )
def lowercase_ ( self ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def lowercase_ ( self ) -> Any:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : str = self.get_tokenizer()
lowerCAmelCase_ : List[str] = self.get_rust_tokenizer()
lowerCAmelCase_ : Dict = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase_ : Dict = tokenizer.tokenize(__lowercase )
lowerCAmelCase_ : Tuple = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
lowerCAmelCase_ : str = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
lowerCAmelCase_ : Dict = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
lowerCAmelCase_ : Optional[int] = tokenizer.encode(__lowercase )
lowerCAmelCase_ : Optional[int] = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase=1_5 ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
# Simple input
lowerCAmelCase_ : Dict = '''This is a simple input'''
lowerCAmelCase_ : Any = ['''This is a simple input 1''', '''This is a simple input 2''']
lowerCAmelCase_ : Any = ('''This is a simple input''', '''This is a pair''')
lowerCAmelCase_ : Tuple = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding='''max_length''' )
# Simple input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' )
# Simple input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' , )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding='''max_length''' )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' )
# Pair input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' , )
def lowercase_ ( self ) -> str:
pass
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Any = ReformerTokenizer(__lowercase , keep_accents=__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
lowerCAmelCase_ : List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase_ : Tuple = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
lowerCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowercase_ ( self ) -> Optional[Any]:
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : str = '''Hello World!'''
lowerCAmelCase_ : str = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Optional[int] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowerCAmelCase_ : int = [
1_0_8,
2_6_5,
2_4,
1_1_1,
4,
2_5_8,
1_5_6,
3_5,
2_8,
2_7_5,
3,
2_5_9,
2_9_7,
2_6_0,
8_4,
4,
3_5,
1_1_0,
4_4,
8,
2_5_9,
9_1,
2_6_8,
2_1,
1_1,
2_0_9,
2_7_4,
1_0_9,
2_6_6,
2_7_7,
1_1_7,
8_6,
9_3,
3_1_5,
2_5_8,
2_7_8,
2_5_8,
2_7_7,
2_5_8,
0,
2_5_8,
2_8_8,
2_5_8,
3_1_9,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
2_8_7,
2_5_8,
3_1_5,
2_5_8,
2_8_9,
2_5_8,
2_7_8,
9_9,
2_6_9,
2_6_6,
2_6_2,
8,
2_5_9,
2_4_1,
4,
2_1_7,
2_3_0,
2_6_8,
2_6_6,
5_5,
1_6_8,
1_0_6,
7_5,
1_9_3,
2_6_6,
2_2_3,
2_7,
4_9,
2_6,
2_8_2,
2_5,
2_6_4,
2_9_9,
1_9,
2_6,
0,
2_5_8,
2_7_7,
1_1_7,
8_6,
9_3,
1_7_6,
1_8_3,
2_7_0,
1_1,
2_6_2,
4_2,
6_1,
2_6_5,
]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@require_torch
@slow
def lowercase_ ( self ) -> Any:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowerCAmelCase_ : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
lowerCAmelCase_ : Optional[int] = ''' '''.join(__lowercase )
lowerCAmelCase_ : Union[str, Any] = self.big_tokenizer.encode_plus(__lowercase , return_tensors='''pt''' )
lowerCAmelCase_ : List[Any] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
lowerCAmelCase_ : Union[str, Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowerCAmelCase_ : List[str] = encoded_sequence['''input_ids'''].shape
lowerCAmelCase_ : Union[str, Any] = ReformerModel(__lowercase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__lowercase )
model(**__lowercase )
@slow
def lowercase_ ( self ) -> List[Any]:
# fmt: off
lowerCAmelCase_ : int = {'''input_ids''': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowerCAmelCase_ : int = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=__lowercase , sequences=__lowercase , ) | 619 |
from manim import *
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4 )
lowerCAmelCase_ : Union[str, Any] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[Any] = Text('''GPU''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
lowerCAmelCase_ : str = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Dict = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4 )
lowerCAmelCase_ : str = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
lowerCAmelCase_ : int = []
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
rect.set_stroke(__lowercase )
lowerCAmelCase_ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowercase , buff=0.0 )
self.add(__lowercase )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : List[str] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = Text('''Loaded Checkpoint''' , font_size=2_4 )
lowerCAmelCase_ : int = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowercase )
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : str = fill.copy().set_fill(__lowercase , opacity=0.7 )
target.move_to(__lowercase )
ckpt_arr.append(__lowercase )
lowerCAmelCase_ : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowercase )
self.add(*__lowercase , *__lowercase )
lowerCAmelCase_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_ : str = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase , __lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowercase )
lowerCAmelCase_ : str = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : int = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
lowerCAmelCase_ : List[str] = Text('''Disk''' , font_size=2_4 )
lowerCAmelCase_ : Optional[int] = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowercase , run_time=3 ) , Write(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) )
lowerCAmelCase_ : int = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase_ : int = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(FadeOut(__lowercase ) )
lowerCAmelCase_ : Union[str, Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=3 ) )
self.play(
FadeOut(__lowercase , __lowercase , *__lowercase , *__lowercase ) , )
self.wait() | 619 | 1 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> str:
lowerCAmelCase_ : List[str] = 1.5
lowerCAmelCase_ : List[Any] = int(factor * num_class_images )
lowerCAmelCase_ : Union[str, Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""" , exist_ok=lowerCAmelCase_ )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowerCAmelCase_ : Any = client.query(text=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowerCAmelCase_ : str = int(factor * num_images )
lowerCAmelCase_ : Tuple = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 , )
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Tuple = tqdm(desc='''downloading real regularization images''' , total=lowerCAmelCase_ )
with open(f"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(f"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
f"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowerCAmelCase_ : Optional[Any] = class_images[count]
count += 1
try:
lowerCAmelCase_ : Optional[Any] = requests.get(images['''url'''] )
if img.status_code == 200:
lowerCAmelCase_ : Union[str, Any] = Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase ( )-> Union[str, Any]:
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser('''''' , add_help=lowerCAmelCase_ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=lowerCAmelCase_ , type=lowerCAmelCase_ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=lowerCAmelCase_ , type=lowerCAmelCase_ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=lowerCAmelCase_ )
return parser.parse_args()
if __name__ == "__main__":
_UpperCAmelCase : List[str] =parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images) | 619 |
_UpperCAmelCase : Dict =[
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[str] = 0
while place < len(lowerCAmelCase_ ):
if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
lowerCAmelCase_ : List[Any] = []
for arabic, roman in ROMAN:
((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = divmod(lowerCAmelCase_ , lowerCAmelCase_ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.