code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = StableDiffusionControlNetImgaImgPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A ( self : Dict ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
UpperCAmelCase : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
UpperCAmelCase : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
UpperCAmelCase : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase : Optional[int] = CLIPTextModel(__snake_case )
UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase : Tuple = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any]=0 ) -> Optional[Any]:
if str(__snake_case ).startswith('''mps''' ):
UpperCAmelCase : List[Any] = torch.manual_seed(__snake_case )
else:
UpperCAmelCase : Dict = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
UpperCAmelCase : str = 2
UpperCAmelCase : Union[str, Any] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , )
UpperCAmelCase : Dict = floats_tensor(control_image.shape , rng=random.Random(__snake_case ) ).to(__snake_case )
UpperCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : Optional[int] = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def A ( self : List[Any] ) -> str:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A ( self : Any ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def A ( self : Any ) -> Tuple:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class SCREAMING_SNAKE_CASE( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = StableDiffusionControlNetImgaImgPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase__ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def A ( self : List[str] ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__snake_case : List[str] ):
if isinstance(__snake_case , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCAmelCase : Optional[int] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__snake_case )
torch.manual_seed(0 )
UpperCAmelCase : List[str] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__snake_case )
torch.manual_seed(0 )
UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
UpperCAmelCase : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase : Any = CLIPTextModel(__snake_case )
UpperCAmelCase : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase : Union[str, Any] = MultiControlNetModel([controlneta, controlneta] )
UpperCAmelCase : Union[str, Any] = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A ( self : Union[str, Any] , __snake_case : Any , __snake_case : Optional[int]=0 ) -> Optional[Any]:
if str(__snake_case ).startswith('''mps''' ):
UpperCAmelCase : int = torch.manual_seed(__snake_case )
else:
UpperCAmelCase : Union[str, Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
UpperCAmelCase : Optional[Any] = 2
UpperCAmelCase : Optional[int] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ),
]
UpperCAmelCase : int = floats_tensor(control_image[0].shape , rng=random.Random(__snake_case ) ).to(__snake_case )
UpperCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : Optional[Any] = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def A ( self : Any ) -> Optional[Any]:
UpperCAmelCase : List[str] = self.get_dummy_components()
UpperCAmelCase : List[str] = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
UpperCAmelCase : Optional[Any] = 10.0
UpperCAmelCase : Any = 4
UpperCAmelCase : Dict = self.get_dummy_inputs(__snake_case )
UpperCAmelCase : int = steps
UpperCAmelCase : List[str] = scale
UpperCAmelCase : Union[str, Any] = pipe(**__snake_case )[0]
UpperCAmelCase : List[str] = self.get_dummy_inputs(__snake_case )
UpperCAmelCase : int = steps
UpperCAmelCase : List[Any] = scale
UpperCAmelCase : str = pipe(**__snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
UpperCAmelCase : str = self.get_dummy_inputs(__snake_case )
UpperCAmelCase : List[Any] = steps
UpperCAmelCase : Union[str, Any] = scale
UpperCAmelCase : int = pipe(**__snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
UpperCAmelCase : Tuple = self.get_dummy_inputs(__snake_case )
UpperCAmelCase : Optional[int] = steps
UpperCAmelCase : Dict = scale
UpperCAmelCase : Dict = pipe(**__snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def A ( self : int ) -> Dict:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A ( self : Tuple ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def A ( self : Optional[Any] ) -> str:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def A ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : List[Any] = self.get_dummy_components()
UpperCAmelCase : List[str] = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Optional[int] ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : str ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
UpperCAmelCase : int = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=__snake_case , controlnet=__snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase : str = '''evil space-punk bird'''
UpperCAmelCase : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
UpperCAmelCase : Dict = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
UpperCAmelCase : Optional[int] = pipe(
__snake_case , __snake_case , control_image=__snake_case , generator=__snake_case , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCAmelCase : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
UpperCAmelCase : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 23 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( snake_case ):
@staticmethod
@abstractmethod
def _lowerCamelCase ( UpperCamelCase_ ) -> Union[str, Any]:
raise NotImplementedError()
@abstractmethod
def _lowerCamelCase ( self ) -> str:
raise NotImplementedError()
| 249 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : str = WavaVecaForSequenceClassification.from_pretrained(a_ , config=a_ )
A_ : Optional[Any] = downstream_dict["""projector.weight"""]
A_ : str = downstream_dict["""projector.bias"""]
A_ : Tuple = downstream_dict["""model.post_net.linear.weight"""]
A_ : Any = downstream_dict["""model.post_net.linear.bias"""]
return model
def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : Dict = WavaVecaForAudioFrameClassification.from_pretrained(a_ , config=a_ )
A_ : List[str] = downstream_dict["""model.linear.weight"""]
A_ : Optional[Any] = downstream_dict["""model.linear.bias"""]
return model
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Any = WavaVecaForXVector.from_pretrained(a_ , config=a_ )
A_ : List[Any] = downstream_dict["""connector.weight"""]
A_ : Optional[int] = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A_ : Union[str, Any] = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
A_ : Dict = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
A_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
A_ : List[str] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
A_ : Optional[Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
A_ : List[Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
A_ : List[str] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : List[str] = torch.load(a_ , map_location="""cpu""" )
A_ : str = checkpoint["""Downstream"""]
A_ : List[Any] = WavaVecaConfig.from_pretrained(a_ )
A_ : List[str] = WavaVecaFeatureExtractor.from_pretrained(
a_ , return_attention_mask=a_ , do_normalize=a_ )
A_ : str = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
A_ : Union[str, Any] = convert_classification(a_ , a_ , a_ )
elif arch.endswith("""ForAudioFrameClassification""" ):
A_ : Dict = convert_diarization(a_ , a_ , a_ )
elif arch.endswith("""ForXVector""" ):
A_ : List[str] = convert_xvector(a_ , a_ , a_ )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
A_ : str = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(a_ )
hf_model.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
UpperCamelCase__ : Any = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 164 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def UpperCAmelCase ( a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : Dict = []
for part_id in partition_order:
A_ : List[str] = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(a_ ):
expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
A_ : Optional[int] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : Optional[int] = spark.range(1_0_0 ).repartition(1 )
A_ : Optional[Any] = Spark(a_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=1_6 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 5_0
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A_ : str = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : List[str] = spark.range(1_0 ).repartition(2 )
A_ : List[str] = [1, 0]
A_ : List[Any] = _generate_iterable_examples(a_ , a_ ) # Reverse the partitions.
A_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , a_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A_ , A_ : List[str] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
A_ : List[str] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : Dict = spark.range(1_0 ).repartition(1 )
A_ : int = SparkExamplesIterable(a_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(a_ ):
assert row_id == F"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
A_ : Dict = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : Union[str, Any] = spark.range(3_0 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
A_ : Optional[int] = lambda a_ : x.reverse()
A_ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [2, 1, 0] )
A_ : Any = SparkExamplesIterable(a_ ).shuffle_data_sources(a_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(a_ ):
A_ , A_ : Any = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
A_ : Tuple = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : List[Any] = spark.range(2_0 ).repartition(4 )
# Partitions 0 and 2
A_ : str = SparkExamplesIterable(a_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
A_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(a_ ):
A_ , A_ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A_ : Optional[Any] = SparkExamplesIterable(a_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
A_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(a_ ):
A_ , A_ : Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
A_ : str = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A_ : List[Any] = spark.range(1_0_0 ).repartition(1 )
A_ : str = Spark(a_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_0_0
| 164 | 1 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_0_0, 0.2_5) = }''')
print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
| 255 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = CLIPTokenizer
_lowerCamelCase = CLIPTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = {}
_lowerCamelCase = False
def lowercase ( self : Tuple ) -> int:
super().setUp()
# fmt: off
lowercase : Dict = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase : List[Any] = dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowercase : List[str] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
lowercase : Union[str, Any] = {'unk_token': '<unk>'}
lowercase : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
lowercase : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase ) )
def lowercase ( self : Dict, **lowerCAmelCase : Optional[Any] ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase ( self : Optional[Any], **lowerCAmelCase : Tuple ) -> str:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase ( self : Optional[Any], lowerCAmelCase : List[Any] ) -> Optional[Any]:
lowercase : int = 'lower newer'
lowercase : str = 'lower newer'
return input_text, output_text
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
lowercase : str = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowercase : Union[str, Any] = 'lower newer'
lowercase : List[str] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
lowercase : List[str] = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
lowercase : int = tokens + [tokenizer.unk_token]
lowercase : Optional[int] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), lowerCAmelCase )
@require_ftfy
def lowercase ( self : Tuple ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : List[str] = self.tokenizer_class.from_pretrained(lowerCAmelCase, **lowerCAmelCase )
lowercase : Dict = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase, **lowerCAmelCase )
lowercase : Optional[int] = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
lowercase : int = tokenizer_s.tokenize(lowerCAmelCase )
lowercase : Any = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowercase : Optional[int] = 'xa\u0303y' + ' ' + 'x\xe3y'
lowercase : int = tokenizer_s.tokenize(lowerCAmelCase )
lowercase : Optional[Any] = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
# Test that the tokenization is identical on unicode of space type
lowercase : Any = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowercase : Optional[Any] = tokenizer_s.tokenize(lowerCAmelCase )
lowercase : Union[str, Any] = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
# Test that the tokenization is identical on unicode of line break type
lowercase : Optional[Any] = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowercase : str = tokenizer_s.tokenize(lowerCAmelCase )
lowercase : str = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : Any ) -> List[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase : Union[str, Any] = f'''{text_of_1_token} {text_of_1_token}'''
lowercase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase, use_fast=lowerCAmelCase, )
lowercase : Dict = tokenizer_r(lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0], (0, len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1], (len(lowerCAmelCase ) + 1, len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )), )
lowercase : Tuple = f''' {text}'''
lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase, use_fast=lowerCAmelCase, )
lowercase : Dict = tokenizer_r(lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(lowerCAmelCase ) + 1, 1 + len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )), )
def lowercase ( self : Dict ) -> List[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCAmelCase ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def lowercase ( self : List[Any] ) -> str:
super().test_tokenization_python_rust_equals()
def lowercase ( self : Dict ) -> Tuple:
# CLIP always lower cases letters
pass
| 255 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_=7 ,a_=3 ,a_=18 ,a_=30 ,a_=400 ,a_=True ,a_=None ,a_=True ,) -> str:
_UpperCAmelCase : int = size if size is not None else {"height": 18, "width": 18}
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Optional[int] = num_channels
_UpperCAmelCase : Tuple = image_size
_UpperCAmelCase : Tuple = min_resolution
_UpperCAmelCase : Optional[int] = max_resolution
_UpperCAmelCase : str = do_resize
_UpperCAmelCase : int = size
_UpperCAmelCase : Tuple = apply_ocr
def _snake_case ( self ) -> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowercase ( _a , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : int = LayoutLMvaImageProcessingTester(self )
@property
def _snake_case ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a ,"""do_resize""" ) )
self.assertTrue(hasattr(_a ,"""size""" ) )
self.assertTrue(hasattr(_a ,"""apply_ocr""" ) )
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 18} )
_UpperCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
def _snake_case ( self ) -> int:
pass
def _snake_case ( self ) -> str:
# Initialize image_processing
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a ,Image.Image )
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
self.assertIsInstance(encoding.words ,_a )
self.assertIsInstance(encoding.boxes ,_a )
# Test batched
_UpperCAmelCase : Optional[Any] = image_processing(_a ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def _snake_case ( self ) -> Dict:
# Initialize image_processing
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a ,numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a ,np.ndarray )
# Test not batched input
_UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
_UpperCAmelCase : List[str] = image_processing(_a ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def _snake_case ( self ) -> int:
# Initialize image_processing
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a ,torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a ,torch.Tensor )
# Test not batched input
_UpperCAmelCase : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
_UpperCAmelCase : Dict = image_processing(_a ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def _snake_case ( self ) -> Optional[Any]:
# with apply_OCR = True
_UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
_UpperCAmelCase : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" )
_UpperCAmelCase : Dict = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
_UpperCAmelCase : List[Any] = image_processing(_a ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_UpperCAmelCase : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
_UpperCAmelCase : List[Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,_a )
self.assertListEqual(encoding.boxes ,_a )
# with apply_OCR = False
_UpperCAmelCase : Any = LayoutLMvaImageProcessor(apply_ocr=_a )
_UpperCAmelCase : Dict = image_processing(_a ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 370 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
A_ : List[Any] = 637_8137.0
A_ : Dict = 635_6752.31_4245
A_ : int = 6_3_7_8_1_3_7
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> float:
'''simple docstring'''
_UpperCAmelCase : Tuple = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_UpperCAmelCase : Any = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
_UpperCAmelCase : Optional[Any] = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_UpperCAmelCase : Union[str, Any] = haversine_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_UpperCAmelCase : Optional[int] = (b_lata + b_lata) / 2
_UpperCAmelCase : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_UpperCAmelCase : List[str] = (sin(lowerCAmelCase_ ) ** 2) * (cos(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = cos(sigma / 2 ) ** 2
_UpperCAmelCase : Dict = (sigma - sin(lowerCAmelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_UpperCAmelCase : Union[str, Any] = (cos(lowerCAmelCase_ ) ** 2) * (sin(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = sin(sigma / 2 ) ** 2
_UpperCAmelCase : Optional[Any] = (sigma + sin(lowerCAmelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
import os
import numpy
import onnx
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
lowercase : Optional[int] = a.name
lowercase : Union[str, Any] = b.name
lowercase : Optional[Any] = ''''''
lowercase : Optional[int] = ''''''
lowercase : Tuple = a == b
lowercase : str = name_a
lowercase : Any = name_b
return res
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_UpperCamelCase, _UpperCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g, _UpperCamelCase, _UpperCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g, _UpperCamelCase, _UpperCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->int:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = list(model.graph.initializer )
lowercase : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase : str = inits[i].name
lowercase : Union[str, Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->Any:
"""simple docstring"""
lowercase : Tuple = os.path.dirname(_UpperCamelCase )
lowercase : Optional[Any] = os.path.basename(_UpperCamelCase )
lowercase : int = onnx.load(os.path.join(_UpperCamelCase, _UpperCamelCase ) )
lowercase : Tuple = list(model.graph.initializer )
lowercase : str = set()
lowercase : List[Any] = {}
lowercase : Tuple = []
lowercase : str = 0
for i in range(len(_UpperCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1, len(_UpperCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i], inits[j] ):
dup_set.add(_UpperCamelCase )
dup_set.add(_UpperCamelCase )
lowercase : Tuple = inits[j].data_type
lowercase : Union[str, Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''', _UpperCamelCase )
total_reduced_size += mem_size
lowercase : Optional[int] = inits[i].name
lowercase : int = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_UpperCamelCase )
else:
lowercase : Optional[Any] = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''', total_reduced_size / 1024 / 1024 / 1024, '''GB''' )
lowercase : Union[str, Any] = sorted(_UpperCamelCase )
_remove_dup_initializers_from_model(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
lowercase : List[str] = '''optimized_''' + model_file_name
lowercase : Dict = os.path.join(_UpperCamelCase, _UpperCamelCase )
onnx.save(_UpperCamelCase, _UpperCamelCase )
return new_model
| 337 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : Tuple = HfArgumentParser(_UpperCamelCase )
lowercase : List[str] = parser.parse_args_into_dataclasses()[0]
lowercase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase )
try:
lowercase : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] )
lowercase : Any = ''''''
lowercase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] )
lowercase : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase )
raise ValueError(_UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 337 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = AudioLDMPipeline
_UpperCamelCase : Tuple = TEXT_TO_AUDIO_PARAMS
_UpperCamelCase : Tuple = TEXT_TO_AUDIO_BATCH_PARAMS
_UpperCamelCase : Optional[Any] = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=a__ , )
_lowerCAmelCase : Tuple = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
_lowerCAmelCase : Tuple = ClapTextModelWithProjection(a__ )
_lowerCAmelCase : Tuple = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
_lowerCAmelCase : int = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=a__ , )
_lowerCAmelCase : int = SpeechTaHifiGan(a__ )
_lowerCAmelCase : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : int = torch.manual_seed(a__ )
else:
_lowerCAmelCase : str = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : int = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def __A ( self ):
_lowerCAmelCase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Optional[int] = self.get_dummy_components()
_lowerCAmelCase : Any = AudioLDMPipeline(**a__ )
_lowerCAmelCase : Optional[int] = audioldm_pipe.to(a__ )
audioldm_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : int = self.get_dummy_inputs(a__ )
_lowerCAmelCase : Optional[int] = audioldm_pipe(**a__ )
_lowerCAmelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(a__ ) == 256
_lowerCAmelCase : Tuple = audio[:10]
_lowerCAmelCase : Tuple = np.array(
[-0.0_0_5_0, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_3, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_3] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase : Tuple = AudioLDMPipeline(**a__ )
_lowerCAmelCase : Tuple = audioldm_pipe.to(a__ )
_lowerCAmelCase : Optional[Any] = audioldm_pipe.to(a__ )
audioldm_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = self.get_dummy_inputs(a__ )
_lowerCAmelCase : int = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase : List[str] = audioldm_pipe(**a__ )
_lowerCAmelCase : int = output.audios[0]
_lowerCAmelCase : Tuple = self.get_dummy_inputs(a__ )
_lowerCAmelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase : Tuple = audioldm_pipe.tokenizer(
a__ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=a__ , return_tensors="""pt""" , )
_lowerCAmelCase : Optional[int] = text_inputs["""input_ids"""].to(a__ )
_lowerCAmelCase : List[Any] = audioldm_pipe.text_encoder(
a__ , )
_lowerCAmelCase : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase : Any = F.normalize(a__ , dim=-1 )
_lowerCAmelCase : str = prompt_embeds
# forward
_lowerCAmelCase : Optional[Any] = audioldm_pipe(**a__ )
_lowerCAmelCase : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def __A ( self ):
_lowerCAmelCase : str = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = AudioLDMPipeline(**a__ )
_lowerCAmelCase : Union[str, Any] = audioldm_pipe.to(a__ )
_lowerCAmelCase : List[str] = audioldm_pipe.to(a__ )
audioldm_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(a__ )
_lowerCAmelCase : Any = 3 * ["""this is a negative prompt"""]
_lowerCAmelCase : str = negative_prompt
_lowerCAmelCase : str = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase : Tuple = audioldm_pipe(**a__ )
_lowerCAmelCase : Optional[int] = output.audios[0]
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs(a__ )
_lowerCAmelCase : List[Any] = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase : str = []
for p in [prompt, negative_prompt]:
_lowerCAmelCase : Optional[int] = audioldm_pipe.tokenizer(
a__ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=a__ , return_tensors="""pt""" , )
_lowerCAmelCase : Union[str, Any] = text_inputs["""input_ids"""].to(a__ )
_lowerCAmelCase : List[Any] = audioldm_pipe.text_encoder(
a__ , )
_lowerCAmelCase : str = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase : Optional[int] = F.normalize(a__ , dim=-1 )
embeds.append(a__ )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = embeds
# forward
_lowerCAmelCase : str = audioldm_pipe(**a__ )
_lowerCAmelCase : Dict = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def __A ( self ):
_lowerCAmelCase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : List[str] = self.get_dummy_components()
_lowerCAmelCase : int = PNDMScheduler(skip_prk_steps=a__ )
_lowerCAmelCase : List[str] = AudioLDMPipeline(**a__ )
_lowerCAmelCase : List[str] = audioldm_pipe.to(a__ )
audioldm_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(a__ )
_lowerCAmelCase : Dict = """egg cracking"""
_lowerCAmelCase : int = audioldm_pipe(**a__ , negative_prompt=a__ )
_lowerCAmelCase : Dict = output.audios[0]
assert audio.ndim == 1
assert len(a__ ) == 256
_lowerCAmelCase : List[Any] = audio[:10]
_lowerCAmelCase : Optional[int] = np.array(
[-0.0_0_5_1, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_4, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_2] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def __A ( self ):
_lowerCAmelCase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : List[str] = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = PNDMScheduler(skip_prk_steps=a__ )
_lowerCAmelCase : List[str] = AudioLDMPipeline(**a__ )
_lowerCAmelCase : List[Any] = audioldm_pipe.to(a__ )
audioldm_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : List[str] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
_lowerCAmelCase : Optional[Any] = audioldm_pipe(a__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_lowerCAmelCase : int = 2
_lowerCAmelCase : Tuple = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_lowerCAmelCase : Tuple = 2
_lowerCAmelCase : Union[str, Any] = audioldm_pipe(a__ , num_inference_steps=2 , num_waveforms_per_prompt=a__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_lowerCAmelCase : Dict = 2
_lowerCAmelCase : Union[str, Any] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=a__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Optional[int] = self.get_dummy_components()
_lowerCAmelCase : str = AudioLDMPipeline(**a__ )
_lowerCAmelCase : List[Any] = audioldm_pipe.to(a__ )
audioldm_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(a__ )
_lowerCAmelCase : List[str] = audioldm_pipe(audio_length_in_s=0.0_1_6 , **a__ )
_lowerCAmelCase : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(a__ ) / vocoder_sampling_rate == 0.0_1_6
_lowerCAmelCase : List[Any] = audioldm_pipe(audio_length_in_s=0.0_3_2 , **a__ )
_lowerCAmelCase : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(a__ ) / vocoder_sampling_rate == 0.0_3_2
def __A ( self ):
_lowerCAmelCase : Any = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = AudioLDMPipeline(**a__ )
_lowerCAmelCase : str = audioldm_pipe.to(a__ )
audioldm_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : int = ["""hey"""]
_lowerCAmelCase : List[Any] = audioldm_pipe(a__ , num_inference_steps=1 )
_lowerCAmelCase : Dict = output.audios.shape
assert audio_shape == (1, 256)
_lowerCAmelCase : List[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_lowerCAmelCase : Any = SpeechTaHifiGan(a__ ).to(a__ )
_lowerCAmelCase : List[str] = audioldm_pipe(a__ , num_inference_steps=1 )
_lowerCAmelCase : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def __A ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=a__ )
def __A ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=a__ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a__ )
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , a__ , a__="cpu" , a__=torch.floataa , a__=0 ):
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Dict = np.random.RandomState(a__ ).standard_normal((1, 8, 128, 16) )
_lowerCAmelCase : str = torch.from_numpy(a__ ).to(device=a__ , dtype=a__ )
_lowerCAmelCase : Any = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def __A ( self ):
_lowerCAmelCase : Optional[Any] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_lowerCAmelCase : Dict = audioldm_pipe.to(a__ )
audioldm_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : List[str] = self.get_inputs(a__ )
_lowerCAmelCase : Union[str, Any] = 25
_lowerCAmelCase : Optional[Any] = audioldm_pipe(**a__ ).audios[0]
assert audio.ndim == 1
assert len(a__ ) == 81920
_lowerCAmelCase : Dict = audio[77230:77240]
_lowerCAmelCase : Optional[int] = np.array(
[-0.4_8_8_4, -0.4_6_0_7, 0.0_0_2_3, 0.5_0_0_7, 0.5_8_9_6, 0.5_1_5_1, 0.3_8_1_3, -0.0_2_0_8, -0.3_6_8_7, -0.4_3_1_5] )
_lowerCAmelCase : Optional[int] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def __A ( self ):
_lowerCAmelCase : Optional[Any] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_lowerCAmelCase : Dict = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_lowerCAmelCase : str = audioldm_pipe.to(a__ )
audioldm_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = self.get_inputs(a__ )
_lowerCAmelCase : int = audioldm_pipe(**a__ ).audios[0]
assert audio.ndim == 1
assert len(a__ ) == 81920
_lowerCAmelCase : List[Any] = audio[27780:27790]
_lowerCAmelCase : Tuple = np.array([-0.2_1_3_1, -0.0_8_7_3, -0.0_1_2_4, -0.0_1_8_9, 0.0_5_6_9, 0.1_3_7_3, 0.1_8_8_3, 0.2_8_8_6, 0.3_2_9_7, 0.2_2_1_2] )
_lowerCAmelCase : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 126 | """simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __A :
def __init__( self , a__ , a__=2 , a__=3 , a__=4 , a__=2 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=36 , a__=3 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=6 , a__=6 , a__=3 , a__=4 , a__=None , a__=1000 , ):
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : str = image_size
_lowerCAmelCase : str = patch_size
_lowerCAmelCase : str = text_seq_length
_lowerCAmelCase : List[Any] = is_training
_lowerCAmelCase : List[str] = use_input_mask
_lowerCAmelCase : List[Any] = use_token_type_ids
_lowerCAmelCase : Optional[Any] = use_labels
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : Dict = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Dict = coordinate_size
_lowerCAmelCase : Optional[int] = shape_size
_lowerCAmelCase : str = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCAmelCase : Optional[int] = text_seq_length
_lowerCAmelCase : Any = (image_size // patch_size) ** 2 + 1
_lowerCAmelCase : Any = self.text_seq_length + self.image_seq_length
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Optional[Any] = bbox[i, j, 3]
_lowerCAmelCase : List[str] = bbox[i, j, 1]
_lowerCAmelCase : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : int = bbox[i, j, 2]
_lowerCAmelCase : Optional[int] = bbox[i, j, 0]
_lowerCAmelCase : Optional[int] = t
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_input_mask:
_lowerCAmelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowerCAmelCase : int = None
_lowerCAmelCase : int = None
if self.use_labels:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowerCAmelCase : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = LayoutLMvaModel(config=a__ )
model.to(a__ )
model.eval()
# text + image
_lowerCAmelCase : Optional[Any] = model(a__ , pixel_values=a__ )
_lowerCAmelCase : Any = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ )
_lowerCAmelCase : List[Any] = model(a__ , bbox=a__ , pixel_values=a__ , token_type_ids=a__ )
_lowerCAmelCase : Tuple = model(a__ , bbox=a__ , pixel_values=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCAmelCase : Dict = model(a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCAmelCase : Optional[Any] = model(pixel_values=a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = LayoutLMvaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Tuple = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self.num_labels
_lowerCAmelCase : str = LayoutLMvaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[str] = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Dict = LayoutLMvaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : str = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Any = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
_UpperCamelCase : Any = False
_UpperCamelCase : int = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __A ( self ):
_lowerCAmelCase : Any = LayoutLMvaModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self , a__ , a__ , a__=False ):
_lowerCAmelCase : List[str] = copy.deepcopy(a__ )
if model_class in get_values(a__ ):
_lowerCAmelCase : Optional[int] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a__ ):
_lowerCAmelCase : List[Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in get_values(a__ ):
_lowerCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
_lowerCAmelCase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in [
*get_values(a__ ),
]:
_lowerCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in [
*get_values(a__ ),
]:
_lowerCAmelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a__ , )
return inputs_dict
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : int = type
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def __A ( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = LayoutLMvaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __A ( unittest.TestCase ):
@cached_property
def __A ( self ):
return LayoutLMvaImageProcessor(apply_ocr=a__ ) if is_vision_available() else None
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(a__ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Dict = image_processor(images=a__ , return_tensors="""pt""" ).pixel_values.to(a__ )
_lowerCAmelCase : Optional[Any] = torch.tensor([[1, 2]] )
_lowerCAmelCase : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowerCAmelCase : str = model(
input_ids=input_ids.to(a__ ) , bbox=bbox.to(a__ ) , pixel_values=pixel_values.to(a__ ) , )
# verify the logits
_lowerCAmelCase : Optional[int] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a__ )
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1e-4 ) )
| 126 | 1 |
import re
from filelock import FileLock
try:
import nltk
SCREAMING_SNAKE_CASE : List[Any] = True
except (ImportError, ModuleNotFoundError):
SCREAMING_SNAKE_CASE : int = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def UpperCamelCase_( lowerCamelCase_ ) -> str:
re.sub('<n>' , '' , lowerCamelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCamelCase_ ) )
| 21 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
lowercase__ = TypeVar("""_T""")
class __lowerCamelCase ( Generic[_T] ):
'''simple docstring'''
def __init__( self : Optional[int] , a_ : Iterable[_T] | None = None ):
lowerCAmelCase_ : list[_T] = list(iterable or [] )
lowerCAmelCase_ : list[_T] = []
def __len__( self : str ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : List[Any] ):
return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def lowerCamelCase ( self : List[str] , a_ : _T ):
self._stacka.append(a_ )
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : int = self._stacka.pop
lowerCAmelCase_ : Any = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 241 | 0 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCamelCase_ ( yaml.SafeLoader ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Optional[Any] ) -> List[Any]:
A : Optional[int] = [self.constructed_objects[key_node] for key_node, _ in node.value]
A : List[str] = [tuple(__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else key for key in keys]
A : Optional[int] = Counter(__lowerCamelCase )
A : str = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[str]=False ) -> List[Any]:
A : List[str] = super().construct_mapping(__lowerCamelCase , deep=__lowerCamelCase )
self._check_no_duplicates_on_constructed_node(__lowerCamelCase )
return mapping
def UpperCAmelCase ( _lowerCamelCase ):
A : Optional[Any] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
A : Tuple = full_content[1:].index("---" ) + 1
A : str = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_lowerCamelCase )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
# class attributes
a__ = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , __lowerCamelCase : Path ) -> "DatasetMetadata":
with open(__lowerCamelCase , encoding="utf-8" ) as readme_file:
A , A : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__lowerCamelCase )
else:
return cls()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Path ) -> Optional[Any]:
if path.exists():
with open(__lowerCamelCase , encoding="utf-8" ) as readme_file:
A : Any = readme_file.read()
else:
A : Union[str, Any] = None
A : List[Any] = self._to_readme(__lowerCamelCase )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Optional[str] = None ) -> str:
if readme_content is not None:
A , A : List[Any] = _split_yaml_from_readme(__lowerCamelCase )
A : int = "---\n" + self.to_yaml_string() + "---\n" + content
else:
A : int = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , __lowerCamelCase : str ) -> "DatasetMetadata":
A : int = yaml.load(__lowerCamelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
A : int = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__lowerCamelCase , allow_unicode=__lowerCamelCase , encoding="utf-8" , ).decode("utf-8" )
__SCREAMING_SNAKE_CASE = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__SCREAMING_SNAKE_CASE = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
__SCREAMING_SNAKE_CASE = ap.parse_args()
__SCREAMING_SNAKE_CASE = Path(args.readme_filepath)
__SCREAMING_SNAKE_CASE = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath) | 256 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE = {"""facebook/bart-base""": BartForConditionalGeneration}
__SCREAMING_SNAKE_CASE = {"""facebook/bart-base""": BartTokenizer}
def UpperCAmelCase ( ):
A : List[Any] = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=_lowerCamelCase , default=_lowerCamelCase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=_lowerCamelCase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=_lowerCamelCase , default=_lowerCamelCase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_lowerCamelCase , )
parser.add_argument(
"--config_name" , type=_lowerCamelCase , default=_lowerCamelCase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=_lowerCamelCase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=_lowerCamelCase , default=_lowerCamelCase , help="Where to store the final ONNX file." )
A : Any = parser.parse_args()
return args
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase="cpu" ):
A : int = model_dict[model_name].from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
A : List[Any] = tokenizer_dict[model_name].from_pretrained(_lowerCamelCase )
if model_name in ["facebook/bart-base"]:
A : Optional[int] = 0
A : Union[str, Any] = None
A : Optional[Any] = 0
return huggingface_model, tokenizer
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
model.eval()
A : Optional[Any] = None
A : List[Any] = torch.jit.script(BARTBeamSearchGenerator(_lowerCamelCase ) )
with torch.no_grad():
A : int = "My friends are cool but they eat too many carbs."
A : List[Any] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device )
A : int = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=_lowerCamelCase , max_length=_lowerCamelCase , early_stopping=_lowerCamelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_lowerCamelCase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _lowerCamelCase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=_lowerCamelCase , )
logger.info("Model exported to {}".format(_lowerCamelCase ) )
A : Optional[Any] = remove_dup_initializers(os.path.abspath(_lowerCamelCase ) )
logger.info("Deduplicated and optimized model written to {}".format(_lowerCamelCase ) )
A : List[Any] = onnxruntime.InferenceSession(_lowerCamelCase )
A : Dict = ort_sess.run(
_lowerCamelCase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(_lowerCamelCase ),
"max_length": np.array(_lowerCamelCase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def UpperCAmelCase ( ):
A : Union[str, Any] = parse_args()
A : List[Any] = 5
A : str = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
A : Union[str, Any] = torch.device(args.device )
A , A : Optional[int] = load_model_tokenizer(args.model_name_or_path , _lowerCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(_lowerCamelCase )
if args.max_length:
A : Optional[int] = args.max_length
if args.num_beams:
A : List[Any] = args.num_beams
if args.output_file_path:
A : int = args.output_file_path
else:
A : int = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main() | 256 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : int = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314 |
def UpperCAmelCase_ ( _A = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
SCREAMING_SNAKE_CASE__ = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 314 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =None
lowerCamelCase__ =None
SCREAMING_SNAKE_CASE : List[str] = namedtuple("""CoinsDistribResult""", """moves excess""")
def lowercase ( _snake_case : TreeNode | None ) ->int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(_snake_case : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_snake_case : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_snake_case ) != count_coins(_snake_case ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(_snake_case : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__snake_case , __snake_case : str = get_distrib(node.left )
__snake_case , __snake_case : Optional[Any] = get_distrib(node.right )
__snake_case : Union[str, Any] = 1 - left_distrib_excess
__snake_case : Optional[int] = 1 - right_distrib_excess
__snake_case : str = (
left_distrib_moves
+ right_distrib_moves
+ abs(_snake_case )
+ abs(_snake_case )
)
__snake_case : List[str] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_snake_case , _snake_case )
return get_distrib(_snake_case )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
"""simple docstring"""
def lowercase ( ) ->int:
"""simple docstring"""
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(_snake_case , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 1 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _a ( UpperCamelCase_ : Features ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = np.inf
def set_batch_size(UpperCamelCase_ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = min(UpperCamelCase_ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = min(UpperCamelCase_ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and feature.dtype == "binary":
lowerCAmelCase__ = min(UpperCamelCase_ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(UpperCamelCase_ , UpperCamelCase_ )
return None if batch_size is np.inf else batch_size
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , **__UpperCAmelCase , )-> Tuple:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths}
lowerCAmelCase__ = _PACKAGED_DATASETS_MODULES["parquet"][1]
lowerCAmelCase__ = Parquet(
cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , hash=__UpperCAmelCase , **__UpperCAmelCase , )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
if self.streaming:
lowerCAmelCase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
lowerCAmelCase__ = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = dataset
lowerCAmelCase__ = path_or_buf
lowerCAmelCase__ = batch_size or get_writer_batch_size(dataset.features )
lowerCAmelCase__ = parquet_writer_kwargs
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
lowerCAmelCase__ = self._write(file_obj=__UpperCAmelCase , batch_size=__UpperCAmelCase , **self.parquet_writer_kwargs )
else:
lowerCAmelCase__ = self._write(file_obj=self.path_or_buf , batch_size=__UpperCAmelCase , **self.parquet_writer_kwargs )
return written
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = parquet_writer_kwargs.pop("path_or_buf" , __UpperCAmelCase )
lowerCAmelCase__ = self.dataset.features.arrow_schema
lowerCAmelCase__ = pq.ParquetWriter(__UpperCAmelCase , schema=__UpperCAmelCase , **__UpperCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __UpperCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
lowerCAmelCase__ = query_table(
table=self.dataset._data , key=slice(__UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__UpperCAmelCase )
written += batch.nbytes
writer.close()
return written
| 340 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ = '''src/transformers'''
a_ = '''docs/source/en/tasks'''
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Find the start prompt.
lowerCAmelCase__ = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase__ = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(TRANSFORMERS_PATH)
a_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
lowerCAmelCase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 340 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = ["""image_processor""", """tokenizer"""]
a_ = """BlipImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] ) -> int:
__lowerCAmelCase = False
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = self.image_processor
def __call__( self : Tuple , lowerCAmelCase_ : ImageInput = None , lowerCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Optional[int] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__lowerCAmelCase = self.tokenizer
__lowerCAmelCase = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
return text_encoding
# add pixel_values
__lowerCAmelCase = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
if text is not None:
__lowerCAmelCase = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
else:
__lowerCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase_ )
return encoding_image_processor
def lowercase ( self : List[Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Tuple ) -> Optional[int]:
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Any , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : List[Any] ) -> Any:
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase ( self : List[Any] ) -> Any:
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 207 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def a_ ( lowerCAmelCase_ : Dict[str, torch.Tensor] ):
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for rt in rc.restypes:
__lowerCAmelCase = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__lowerCAmelCase = {name: i for i, name in enumerate(lowerCAmelCase_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
__lowerCAmelCase = torch.tensor(
lowerCAmelCase_, dtype=torch.intaa, device=protein['aatype'].device, )
__lowerCAmelCase = torch.tensor(
lowerCAmelCase_, dtype=torch.intaa, device=protein['aatype'].device, )
__lowerCAmelCase = torch.tensor(
lowerCAmelCase_, dtype=torch.floataa, device=protein['aatype'].device, )
__lowerCAmelCase = protein['aatype'].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__lowerCAmelCase = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase = residx_atomaa_mask
__lowerCAmelCase = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__lowerCAmelCase = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__lowerCAmelCase = torch.zeros([21, 37], dtype=torch.floataa, device=protein['aatype'].device )
for restype, restype_letter in enumerate(rc.restypes ):
__lowerCAmelCase = rc.restype_atoa[restype_letter]
__lowerCAmelCase = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__lowerCAmelCase = rc.atom_order[atom_name]
__lowerCAmelCase = 1
__lowerCAmelCase = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase = residx_atomaa_mask
return protein
def a_ ( lowerCAmelCase_ : Dict[str, torch.Tensor] ):
__lowerCAmelCase = tree_map(lambda lowerCAmelCase_ : torch.tensor(lowerCAmelCase_, device=batch['aatype'].device ), lowerCAmelCase_, np.ndarray )
__lowerCAmelCase = tensor_tree_map(lambda lowerCAmelCase_ : np.array(lowerCAmelCase_ ), make_atomaa_masks(lowerCAmelCase_ ) )
return out
| 207 | 1 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
SCREAMING_SNAKE_CASE :int = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE :List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
SCREAMING_SNAKE_CASE :Any = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
SCREAMING_SNAKE_CASE :Optional[int] = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
SCREAMING_SNAKE_CASE :List[str] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
SCREAMING_SNAKE_CASE :List[Any] = [
("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""),
("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""),
("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""),
("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""),
("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""),
("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""),
("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""),
("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""),
("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""),
(
"""zero-shot-object-detection""",
"""MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""",
"""AutoModelForZeroShotObjectDetection""",
),
("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""),
("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""),
("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""),
("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""),
(
"""table-question-answering""",
"""MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForTableQuestionAnswering""",
),
("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""),
("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""),
(
"""next-sentence-prediction""",
"""MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""",
"""AutoModelForNextSentencePrediction""",
),
(
"""audio-frame-classification""",
"""MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForAudioFrameClassification""",
),
("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""),
(
"""document-question-answering""",
"""MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForDocumentQuestionAnswering""",
),
(
"""visual-question-answering""",
"""MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForVisualQuestionAnswering""",
),
("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""),
(
"""zero-shot-image-classification""",
"""MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForZeroShotImageClassification""",
),
("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""),
("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""),
("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""),
]
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
__A = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , _lowerCAmelCase )
return [m.group(0 ) for m in matches]
def UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
__A = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__A = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__A = collections.defaultdict(_lowerCAmelCase )
__A = collections.defaultdict(_lowerCAmelCase )
__A = collections.defaultdict(_lowerCAmelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_lowerCAmelCase ):
__A = None
if _re_tf_models.match(_lowerCAmelCase ) is not None:
__A = tf_models
__A = _re_tf_models.match(_lowerCAmelCase ).groups()[0]
elif _re_flax_models.match(_lowerCAmelCase ) is not None:
__A = flax_models
__A = _re_flax_models.match(_lowerCAmelCase ).groups()[0]
elif _re_pt_models.match(_lowerCAmelCase ) is not None:
__A = pt_models
__A = _re_pt_models.match(_lowerCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(_lowerCAmelCase ) > 0:
if attr_name in model_prefix_to_model_type:
__A = True
break
# Try again after removing the last word in the name
__A = "".join(camel_case_split(_lowerCAmelCase )[:-1] )
__A = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__A = list(_lowerCAmelCase )
all_models.sort()
__A = {"model_type": all_models}
__A = [pt_models[t] for t in all_models]
__A = [tf_models[t] for t in all_models]
__A = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__A = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__A = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__A = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__A = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__A = "AutoTokenizer"
__A = [processors[t] for t in all_models]
return pd.DataFrame(_lowerCAmelCase )
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
__A = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__A = [model_mapping, F'''TF_{model_mapping}''', F'''FLAX_{model_mapping}''']
__A = [auto_class, F'''TF_{auto_class}''', F'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(_lowerCAmelCase , _lowerCAmelCase ):
continue
# First extract all model_names
__A = []
for name in getattr(_lowerCAmelCase , _lowerCAmelCase ).values():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
model_names.append(_lowerCAmelCase )
else:
model_names.extend(list(_lowerCAmelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def UpperCAmelCase ( a_ , a_ ) -> List[Any]:
"""simple docstring"""
__A = get_frameworks_table()
__A = Dataset.from_pandas(_lowerCAmelCase )
__A = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=_lowerCAmelCase )
__A = Dataset.from_json(_lowerCAmelCase )
__A = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(_lowerCAmelCase ) )
}
__A = update_pipeline_and_auto_class_table(_lowerCAmelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__A = sorted(table.keys() )
__A = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
__A = Dataset.from_pandas(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_lowerCAmelCase , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(_lowerCAmelCase , "pipeline_tags.json" ) )
if commit_sha is not None:
__A = (
F'''Update with commit {commit_sha}\n\nSee: '''
F'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
__A = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=_lowerCAmelCase , repo_type="dataset" , token=_lowerCAmelCase , commit_message=_lowerCAmelCase , )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__A = transformers_module.pipelines.SUPPORTED_TASKS
__A = []
for key in pipeline_tasks:
if key not in in_table:
__A = pipeline_tasks[key]["pt"]
if isinstance(_lowerCAmelCase , (list, tuple) ):
__A = model[0]
__A = model.__name__
if model not in in_table.values():
missing.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__A = ", ".join(_lowerCAmelCase )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
F'''`utils/update_metadata.py`: {msg}. Please add them!''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
SCREAMING_SNAKE_CASE :Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 15 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCAmelCase : Dict =TypeVar("""T""")
class _lowercase (Generic[T] ):
'''simple docstring'''
def __init__( self , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = data
UpperCamelCase_ = None
def __str__( self ):
'''simple docstring'''
return F"""{self.data}"""
class _lowercase (Generic[T] ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
UpperCamelCase_ = None
def __iter__( self ):
'''simple docstring'''
UpperCamelCase_ = self.top
while node:
yield node.data
UpperCamelCase_ = node.next
def __str__( self ):
'''simple docstring'''
return "->".join([str(snake_case__ ) for item in self] )
def __len__( self ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.top is None
def _lowerCamelCase ( self , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = Node(snake_case__ )
if not self.is_empty():
UpperCamelCase_ = self.top
UpperCamelCase_ = node
def _lowerCamelCase ( self ):
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , snake_case__ )
UpperCamelCase_ = self.top
UpperCamelCase_ = self.top.next
return pop_node.data
def _lowerCamelCase ( self ):
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 128 | 0 |
"""simple docstring"""
class lowerCamelCase__ :
def __init__( self ,A ,A ,A ):
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = graph
self._normalize_graph(A ,A )
UpperCAmelCase = len(A )
UpperCAmelCase = None
def _UpperCamelCase ( self ,A ,A ):
if sources is int:
UpperCAmelCase = [sources]
if sinks is int:
UpperCAmelCase = [sinks]
if len(A ) == 0 or len(A ) == 0:
return
UpperCAmelCase = sources[0]
UpperCAmelCase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(A ) > 1 or len(A ) > 1:
UpperCAmelCase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
UpperCAmelCase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 ,0 )
self.graph.insert(0 ,[0] * size )
for i in sources:
UpperCAmelCase = max_input_flow
UpperCAmelCase = 0
UpperCAmelCase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
UpperCAmelCase = max_input_flow
UpperCAmelCase = size - 1
def _UpperCamelCase ( self ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = algorithm(self )
class lowerCamelCase__ :
def __init__( self ,A ):
UpperCAmelCase = flow_network
UpperCAmelCase = flow_network.verticesCount
UpperCAmelCase = flow_network.sourceIndex
UpperCAmelCase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
UpperCAmelCase = flow_network.graph
UpperCAmelCase = False
def _UpperCamelCase ( self ):
if not self.executed:
self._algorithm()
UpperCAmelCase = True
def _UpperCamelCase ( self ):
pass
class lowerCamelCase__ ( snake_case ):
def __init__( self ,A ):
super().__init__(A )
# use this to save your result
UpperCAmelCase = -1
def _UpperCamelCase ( self ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase__ ( snake_case ):
def __init__( self ,A ):
super().__init__(A )
UpperCAmelCase = [[0] * self.verticies_count for i in range(self.verticies_count )]
UpperCAmelCase = [0] * self.verticies_count
UpperCAmelCase = [0] * self.verticies_count
def _UpperCamelCase ( self ):
UpperCAmelCase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
UpperCAmelCase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
UpperCAmelCase = 0
while i < len(A ):
UpperCAmelCase = vertices_list[i]
UpperCAmelCase = self.heights[vertex_index]
self.process_vertex(A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 ,vertices_list.pop(A ) )
UpperCAmelCase = 0
else:
i += 1
UpperCAmelCase = sum(self.preflow[self.source_index] )
def _UpperCamelCase ( self ,A ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(A ,A )
self.relabel(A )
def _UpperCamelCase ( self ,A ,A ):
UpperCAmelCase = min(
self.excesses[from_index] ,self.graph[from_index][to_index] - self.preflow[from_index][to_index] ,)
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
UpperCAmelCase = self.heights[to_index]
if min_height is not None:
UpperCAmelCase = min_height + 1
if __name__ == "__main__":
_UpperCamelCase = [0]
_UpperCamelCase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
_UpperCamelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
_UpperCamelCase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
_UpperCamelCase = flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 362 |
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case , _snake_case = None , _snake_case = None ):
"""simple docstring"""
if start is None:
UpperCAmelCase = 0
if end is None:
UpperCAmelCase = len(_snake_case ) - 1
if start >= end:
return
UpperCAmelCase = (start + end) // 2
slowsort(_snake_case , _snake_case , _snake_case )
slowsort(_snake_case , mid + 1 , _snake_case )
if sequence[end] < sequence[mid]:
UpperCAmelCase , UpperCAmelCase = sequence[mid], sequence[end]
slowsort(_snake_case , _snake_case , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 234 | 0 |
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 15 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : nn.Module , _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = module
__lowercase = nn.Sequential(
nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , )
__lowercase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a__ ( self : str , _UpperCAmelCase : List[str] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCAmelCase__ : int = "bigscience/bloom-1b7"
# Constant values
lowerCAmelCase__ : Any = 2.109659552692574
lowerCAmelCase__ : str = "Hello my name is"
lowerCAmelCase__ : Any = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCAmelCase__ : List[Any] = 10
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Models and tokenizer
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = self.model_abit.config
self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) )
__lowercase = config.to_dict()
__lowercase = config.to_diff_dict()
__lowercase = config.to_json_string()
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
__lowercase = self.model_fpaa.get_memory_footprint()
__lowercase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowercase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
__lowercase = True
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
with self.assertRaises(_UpperCAmelCase ):
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_fpaa.to(torch.floataa )
__lowercase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__lowercase = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__lowercase = self.model_fpaa.half()
# Check this does not throw an error
__lowercase = self.model_fpaa.float()
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
@classmethod
def a__ ( cls : int ) -> Tuple:
"""simple docstring"""
__lowercase = 't5-small'
__lowercase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__lowercase = AutoTokenizer.from_pretrained(cls.model_name )
__lowercase = 'Translate in German: Hello, my dog is cute'
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : int ) -> int:
"""simple docstring"""
from transformers import TaForConditionalGeneration
__lowercase = TaForConditionalGeneration._keep_in_fpaa_modules
__lowercase = None
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
__lowercase = modules
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().setUp()
# model_name
__lowercase = 'bigscience/bloom-560m'
__lowercase = 't5-small'
# Different types of model
__lowercase = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Sequence classification model
__lowercase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# CausalLM model
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Seq2seq model
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowercase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__lowercase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'facebook/opt-350m'
super().setUp()
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowercase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowercase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_UpperCAmelCase ) ):
__lowercase = LoRALayer(module.q_proj , rank=16 )
__lowercase = LoRALayer(module.k_proj , rank=16 )
__lowercase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__lowercase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowercase = model.forward(**_UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Any = "gpt2-xl"
lowerCAmelCase__ : str = 3.3191854854152187
| 325 | 0 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : str ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = LxmertConfig.from_json_file(lowercase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
A_ = LxmertForPreTraining(lowercase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowercase_ ,lowercase_ ,lowercase_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() ,lowercase_ )
if __name__ == "__main__":
__a :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a :Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 362 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __snake_case ( __UpperCamelCase : Features ):
"""simple docstring"""
A_ = np.inf
def set_batch_size(__UpperCamelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ) and feature.dtype == "binary":
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__UpperCamelCase ,__UpperCamelCase )
return None if batch_size is np.inf else batch_size
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : NestedDataStructureLike[PathLike] , UpperCAmelCase : Optional[NamedSplit] = None , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Tuple , ):
super().__init__(
UpperCAmelCase , split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , num_proc=UpperCAmelCase , **UpperCAmelCase , )
A_ = path_or_paths if isinstance(UpperCAmelCase , UpperCAmelCase ) else {self.split: path_or_paths}
A_ = _PACKAGED_DATASETS_MODULES["parquet"][1]
A_ = Parquet(
cache_dir=UpperCAmelCase , data_files=UpperCAmelCase , features=UpperCAmelCase , hash=UpperCAmelCase , **UpperCAmelCase , )
def __A ( self : Optional[Any] ):
# Build iterable dataset
if self.streaming:
A_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ = None
A_ = None
A_ = None
A_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , num_proc=self.num_proc , )
A_ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : Dataset , UpperCAmelCase : Union[PathLike, BinaryIO] , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : List[Any] , ):
A_ = dataset
A_ = path_or_buf
A_ = batch_size or get_writer_batch_size(dataset.features )
A_ = parquet_writer_kwargs
def __A ( self : int ):
A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
A_ = self._write(file_obj=UpperCAmelCase , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs )
else:
A_ = self._write(file_obj=self.path_or_buf , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs )
return written
def __A ( self : Tuple , UpperCAmelCase : BinaryIO , UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ):
A_ = 0
A_ = parquet_writer_kwargs.pop("path_or_buf" , UpperCAmelCase )
A_ = self.dataset.features.arrow_schema
A_ = pq.ParquetWriter(UpperCAmelCase , schema=UpperCAmelCase , **UpperCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
A_ = query_table(
table=self.dataset._data , key=slice(UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCAmelCase )
written += batch.nbytes
writer.close()
return written | 329 | 0 |
'''simple docstring'''
import os
from pathlib import Path
def __magic_name__ ( ) -> str:
'''simple docstring'''
from torch.utils.cpp_extension import load
snake_case_ = Path(__UpperCAmelCase ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
snake_case_ = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''', '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''', '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''', __UpperCAmelCase, with_cuda=__UpperCAmelCase, extra_include_paths=[str(__UpperCAmelCase )], extra_cflags=['''-DWITH_CUDA=1'''], extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
], )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 56 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = AutoencoderKL
snake_case_ = "sample"
snake_case_ = 1e-2
@property
def A_ ( self : Dict ):
snake_case_ = 4
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase_ )
return {"sample": image}
@property
def A_ ( self : List[Any] ):
return (3, 32, 32)
@property
def A_ ( self : Dict ):
return (3, 32, 32)
def A_ ( self : Union[str, Any] ):
snake_case_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def A_ ( self : Any ):
pass
def A_ ( self : str ):
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def A_ ( self : Dict ):
# enable deterministic behavior for gradient checkpointing
snake_case_ ,snake_case_ = self.prepare_init_args_and_inputs_for_common()
snake_case_ = self.model_class(**lowercase_ )
model.to(lowercase_ )
assert not model.is_gradient_checkpointing and model.training
snake_case_ = model(**lowercase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
snake_case_ = torch.randn_like(lowercase_ )
snake_case_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
snake_case_ = self.model_class(**lowercase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowercase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
snake_case_ = model_a(**lowercase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
snake_case_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
snake_case_ = dict(model.named_parameters() )
snake_case_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def A_ ( self : Tuple ):
snake_case_ ,snake_case_ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(lowercase_ )
snake_case_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A_ ( self : Tuple ):
snake_case_ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
snake_case_ = model.to(lowercase_ )
model.eval()
if torch_device == "mps":
snake_case_ = torch.manual_seed(0 )
else:
snake_case_ = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ = image.to(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ , sample_posterior=lowercase_ , generator=lowercase_ ).sample
snake_case_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
snake_case_ = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
snake_case_ = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
snake_case_ = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1e-2 ) )
@slow
class a ( unittest.TestCase ):
def A_ ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ):
return F"gaussian_noise_s={seed}_shape={'_'.join([str(lowercase_ ) for s in shape] )}.npy"
def A_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Dict , lowercase_ : List[Any]=0 , lowercase_ : Union[str, Any]=(4, 3, 512, 512) , lowercase_ : Optional[Any]=False ):
snake_case_ = torch.floataa if fpaa else torch.floataa
snake_case_ = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase_ , lowercase_ ) ) ).to(lowercase_ ).to(lowercase_ )
return image
def A_ ( self : Any , lowercase_ : Dict="CompVis/stable-diffusion-v1-4" , lowercase_ : List[str]=False ):
snake_case_ = '''fp16''' if fpaa else None
snake_case_ = torch.floataa if fpaa else torch.floataa
snake_case_ = AutoencoderKL.from_pretrained(
lowercase_ , subfolder='''vae''' , torch_dtype=lowercase_ , revision=lowercase_ , )
model.to(lowercase_ ).eval()
return model
def A_ ( self : Any , lowercase_ : int=0 ):
if torch_device == "mps":
return torch.manual_seed(lowercase_ )
return torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A_ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Tuple ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ )
snake_case_ = self.get_generator(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ , generator=lowercase_ , sample_posterior=lowercase_ ).sample
assert sample.shape == image.shape
snake_case_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
snake_case_ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowercase_ , lowercase_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Dict ):
snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ )
snake_case_ = self.get_sd_image(lowercase_ , fpaa=lowercase_ )
snake_case_ = self.get_generator(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ , generator=lowercase_ , sample_posterior=lowercase_ ).sample
assert sample.shape == image.shape
snake_case_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
snake_case_ = torch.tensor(lowercase_ )
assert torch_all_close(lowercase_ , lowercase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A_ ( self : Tuple , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ ).sample
assert sample.shape == image.shape
snake_case_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
snake_case_ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowercase_ , lowercase_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Dict , lowercase_ : Tuple , lowercase_ : Optional[int] ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
snake_case_ = sample[-1, -2:, :2, -2:].flatten().cpu()
snake_case_ = torch.tensor(lowercase_ )
assert torch_all_close(lowercase_ , lowercase_ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[Any] ):
snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ )
snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) , fpaa=lowercase_ )
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
snake_case_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
snake_case_ = torch.tensor(lowercase_ )
assert torch_all_close(lowercase_ , lowercase_ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def A_ ( self : Optional[Any] , lowercase_ : List[str] ):
snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ )
snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) , fpaa=lowercase_ )
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowercase_ , lowercase_ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def A_ ( self : Optional[Any] , lowercase_ : Any ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowercase_ , lowercase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def A_ ( self : str , lowercase_ : Optional[int] , lowercase_ : Tuple ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ )
snake_case_ = self.get_generator(lowercase_ )
with torch.no_grad():
snake_case_ = model.encode(lowercase_ ).latent_dist
snake_case_ = dist.sample(generator=lowercase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
snake_case_ = sample[0, -1, -3:, -3:].flatten().cpu()
snake_case_ = torch.tensor(lowercase_ )
snake_case_ = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(lowercase_ , lowercase_ , atol=lowercase_ )
| 56 | 1 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list ) -> list:
if len(lowerCamelCase__ ) <= 1:
return lst
A_ : Any = 1
while i < len(lowerCamelCase__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
A_ : Optional[int] = lst[i], lst[i - 1]
i -= 1
if i == 0:
A_ : List[Any] = 1
return lst
if __name__ == "__main__":
snake_case__ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 370 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : List[str] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A_ : List[str] = TextStreamer(_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Dict = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : str = tokenizer.decode(greedy_ids[0] )
A_ : int = TextIteratorStreamer(_lowerCamelCase )
A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
A_ : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : List[str] = -1
A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : Tuple = greedy_ids[:, input_ids.shape[1] :]
A_ : Tuple = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Any = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase )
A_ : List[Any] = -1
A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A_ : List[str] = cs.out[:-1] # Remove the final "\n"
A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Union[str, Any] = -1
A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 )
A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCamelCase ):
A_ : str = ''''''
for new_text in streamer:
streamer_text += new_text
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =BlenderbotConfig
__a ={}
__a ='gelu'
def __init__( self : Dict , __a : str , __a : Dict=13 , __a : List[str]=7 , __a : List[Any]=True , __a : Tuple=False , __a : Union[str, Any]=99 , __a : List[str]=32 , __a : int=2 , __a : Any=4 , __a : str=37 , __a : Tuple=0.1 , __a : Dict=0.1 , __a : int=20 , __a : List[Any]=2 , __a : int=1 , __a : Optional[int]=0 , ):
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = eos_token_id
_a = pad_token_id
_a = bos_token_id
def UpperCamelCase__ ( self : Tuple ):
_a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_a = tf.concat([input_ids, eos_tensor] , axis=1 )
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_a = prepare_blenderbot_inputs_dict(__a , __a , __a )
return config, inputs_dict
def UpperCamelCase__ ( self : str , __a : int , __a : Tuple ):
_a = TFBlenderbotModel(config=__a ).get_decoder()
_a = inputs_dict["input_ids"]
_a = input_ids[:1, :]
_a = inputs_dict["attention_mask"][:1, :]
_a = inputs_dict["head_mask"]
_a = 1
# first forward pass
_a = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
_a , _a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_a = tf.concat([input_ids, next_tokens] , axis=-1 )
_a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_a = model(__a , attention_mask=__a )[0]
_a = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_a = output_from_no_past[:, -3:, random_slice_idx]
_a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Dict , lowercase : Union[str, Any]=None , lowercase : Optional[int]=None , lowercase : Tuple=None , lowercase : Any=None , lowercase : Union[str, Any]=None , ) -> List[Any]:
if attention_mask is None:
_a = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_a = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__a =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__a =(
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__a =True
__a =False
__a =False
def UpperCamelCase__ ( self : Tuple ):
_a = TFBlenderbotModelTester(self )
_a = ConfigTester(self , config_class=__a )
def UpperCamelCase__ ( self : int ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : int ):
_a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
__a =['My friends are cool but they eat too many carbs.']
__a ='facebook/blenderbot-400M-distill'
@cached_property
def UpperCamelCase__ ( self : Optional[int] ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self : str ):
_a = self.tokenizer(self.src_text , return_tensors="tf" )
_a = self.model.generate(
model_inputs.input_ids , )
_a = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 63 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]:
_a = AutoTokenizer.from_pretrained(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase )
_a = tok.pad_token_id
def get_lens(lowercase : Optional[int] ):
_a = tqdm(
DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a = []
for batch in dl:
_a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist()
_a = batch["labels"].ne(lowercase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase , lowercase ):
max_lens.append(max(lowercase , lowercase ) )
else:
max_lens.extend(lowercase )
return max_lens
_a = get_lens(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase )
_a = get_lens(lowercase )
pickle_save(lowercase , train_ds.len_file )
pickle_save(lowercase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 63 | 1 |
"""simple docstring"""
_snake_case = 8.31_44_62 # Unit - J mol-1 K-1
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 324 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCamelCase ( snake_case_ ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : str ) -> int:
_a : str = parent
_a : Union[str, Any] = config_class
_a : List[Any] = has_text_modality
_a : List[Any] = kwargs
_a : List[Any] = common_properties
def _lowercase ( self : int ) -> Tuple:
_a : List[str] = self.config_class(**self.inputs_dict )
_a : Dict = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase__ ):
try:
setattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase__ ):
try:
_a : Optional[int] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
_a : Optional[Any] = self.config_class(**self.inputs_dict )
_a : List[str] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase__ )
def _lowercase ( self : int ) -> List[str]:
_a : Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = os.path.join(UpperCAmelCase__ , """config.json""" )
config_first.to_json_file(UpperCAmelCase__ )
_a : List[str] = self.config_class.from_json_file(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Union[str, Any] ) -> Dict:
_a : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase__ )
_a : Dict = self.config_class.from_pretrained(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Dict ) -> Tuple:
_a : List[Any] = self.config_class(**self.inputs_dict )
_a : Any = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
_a : List[Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
config_first.save_pretrained(UpperCAmelCase__ )
_a : List[Any] = self.config_class.from_pretrained(UpperCAmelCase__ , subfolder=UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
_a : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_a : Union[str, Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _lowercase ( self : Tuple ) -> List[str]:
if self.config_class.is_composition:
return
_a : str = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a : Dict = copy.deepcopy(UpperCAmelCase__ )
_a : Any = self.config_class(**UpperCAmelCase__ )
_a : str = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase__ , UpperCAmelCase__ ) != value:
wrong_values.append((key, getattr(UpperCAmelCase__ , UpperCAmelCase__ ), value) )
if len(UpperCAmelCase__ ) > 0:
_a : List[Any] = """\n""".join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def _lowercase ( self : int ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 324 | 1 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
UpperCAmelCase_ :Tuple = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __lowerCAmelCase ( self , __A , __A , __A ) -> List[Any]:
lowerCAmelCase_ :Dict = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
lowerCAmelCase_ :int = VideoClassificationPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , top_k=2 )
lowerCAmelCase_ :Union[str, Any] = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def __lowerCAmelCase ( self , __A , __A ) -> Dict:
for example in examples:
lowerCAmelCase_ :Tuple = video_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{"""score""": ANY(SCREAMING_SNAKE_CASE_ ), """label""": ANY(SCREAMING_SNAKE_CASE_ )},
{"""score""": ANY(SCREAMING_SNAKE_CASE_ ), """label""": ANY(SCREAMING_SNAKE_CASE_ )},
] , )
@require_torch
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Optional[Any] = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
lowerCAmelCase_ :Optional[int] = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
lowerCAmelCase_ :str = pipeline(
"""video-classification""" , model=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , frame_sampling_rate=4 )
lowerCAmelCase_ :Tuple = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
lowerCAmelCase_ :int = video_classifier(SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}] , )
lowerCAmelCase_ :Dict = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}],
[{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}],
] , )
@require_tf
def __lowerCAmelCase ( self ) -> int:
pass
| 84 |
'''simple docstring'''
from math import isqrt
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(UpperCAmelCase_ ) + 1 ) )
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 10**6 ) -> int:
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Optional[int] = 1
__lowerCamelCase : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(UpperCAmelCase_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 185 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Optional[Any] = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "markuplm"
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple=30_522 , __SCREAMING_SNAKE_CASE : Optional[Any]=768 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : str=3_072 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-12 , __SCREAMING_SNAKE_CASE : str=0 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=256 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_024 , __SCREAMING_SNAKE_CASE : Dict=216 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_001 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : str=50 , __SCREAMING_SNAKE_CASE : int="absolute" , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Tuple:
"""simple docstring"""
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
# additional properties
__SCREAMING_SNAKE_CASE = max_depth
__SCREAMING_SNAKE_CASE = max_xpath_tag_unit_embeddings
__SCREAMING_SNAKE_CASE = max_xpath_subs_unit_embeddings
__SCREAMING_SNAKE_CASE = tag_pad_id
__SCREAMING_SNAKE_CASE = subs_pad_id
__SCREAMING_SNAKE_CASE = xpath_unit_hidden_size
| 331 | 0 |
from math import factorial
def _lowerCamelCase( lowercase__ = 1_0_0 ) -> int:
'''simple docstring'''
return sum(int(a_ ) for x in str(factorial(a_ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 295 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCamelCase__ : int = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def UpperCamelCase_ ( A__ : list[list[float]] ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(A__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase_ : Any = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase_ : int = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase_ : int = matrix[1][1], matrix[0][0]
lowerCAmelCase_ : Tuple = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(A__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(A__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase_ : Any = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
lowerCAmelCase_ : str = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase_ : str = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase_ : Dict = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase_ : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase_ : List[Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase_ : Tuple = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase_ : Any = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase_ : Optional[Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase_ : List[Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase_ : Tuple = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase_ : List[Any] = array(A__ )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase_ : Union[str, Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase_ : int = array(A__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(A__ )
# Calculate the inverse of the matrix
return [[float(d(A__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 350 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : Tuple ) -> Dict:
lowerCAmelCase_ : str = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def __lowercase ( self : List[Any] ) -> int:
lowerCAmelCase_ : Tuple = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def __lowercase ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase_ : int = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase ) )
def __lowercase ( self : int ) -> List[Any]:
lowerCAmelCase_ : Dict = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def __lowercase ( self : str ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase ) )
def __lowercase ( self : List[Any] ) -> Tuple:
lowerCAmelCase_ : Union[str, Any] = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCAmelCase_ : Union[str, Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __lowercase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase_ : str = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCAmelCase_ : Optional[int] = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __lowercase ( self : Tuple ) -> Optional[Any]:
# pass variant but use the non-variant filenames
lowerCAmelCase_ : Dict = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
lowerCAmelCase_ : str = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __lowercase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase_ : str = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowerCAmelCase_ : List[str] = """fp16"""
self.assertFalse(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase_ : str = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
lowerCAmelCase_ : Optional[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __lowercase ( self : List[Any] ) -> List[Any]:
# pass variant but use the non-variant filenames
lowerCAmelCase_ : Dict = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
lowerCAmelCase_ : Any = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __lowercase ( self : Dict ) -> Any:
lowerCAmelCase_ : Optional[int] = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCAmelCase_ : int = """fp16"""
self.assertFalse(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
| 89 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a( A : Any , A : Dict ) -> Optional[Any]:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a = flax_key_tuple[:-1] + ('''weight''',)
a = torch.permute(__snake_case , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ):
# linear layer
a = flax_key_tuple[:-1] + ('''weight''',)
a = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def a( A : str , A : str , A : Dict ) -> Optional[int]:
"""simple docstring"""
if "metadata" in layer:
a = layer.split("metadata" )
a = ''''''.join(split_layer[0] )[:-1]
a = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
a = layer.split("kvstore" )
a = ''''''.join(split_layer[0] )[:-1]
a = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
a = layer.split("/" )
a = '''/'''.join(split_layer[:-1] )
a = (split_layer[-1],)
if "kvstore/path" in layer:
a = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
a = '''file'''
else:
a = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a( A : Any , A : Optional[Any] ) -> Tuple:
"""simple docstring"""
a = rename_keys(__snake_case )
a = {}
for k, v in current_block.items():
a = v
a = new_current_block
torch.save(__snake_case , __snake_case )
def a( A : List[Any] , A : Any , A : List[str] , A : Dict , A : str = WEIGHTS_NAME ) -> Tuple:
"""simple docstring"""
a = convert_file_size_to_int(__snake_case )
a = []
a = {}
a = 0
a = 0
os.makedirs(__snake_case , exist_ok=__snake_case )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
a = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
a = flatten_dict(__snake_case , sep="/" )
a = {}
for layer in checkpoint_info.keys():
a = get_key_and_tensorstore_dict(
__snake_case , __snake_case , __snake_case )
if curr_real_layer_name in all_layers:
a = content
else:
a = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a = torch.tensor(__snake_case )
a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a = rename_base_flax_keys(tuple(key.split("/" ) ) , __snake_case )
a = '''/'''.join(__snake_case )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a = os.path.join(
__snake_case , weights_name.replace(".bin" , f'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__snake_case , __snake_case )
sharded_state_dicts.append(current_block.keys() )
del current_block
a = {}
a = 0
a = raw_weights.to(getattr(__snake_case , __snake_case ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a = os.path.join(__snake_case , weights_name.replace(".bin" , f'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__snake_case , __snake_case )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__snake_case ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a = {}
a = {}
for idx, shard in enumerate(__snake_case ):
a = weights_name.replace(
".bin" , f'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' ) # len(sharded_state_dicts):05d}
a = os.path.join(__snake_case , weights_name.replace(".bin" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) )
a = shard
for key in shard:
a = shard_file
# Add the metadata
a = {'''total_size''': total_size}
a = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__snake_case , __snake_case ) , "w" , encoding="utf-8" ) as f:
a = json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n'''
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
_lowercase: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
_lowercase: str = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a( ) -> Tuple:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
a = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
a = TaTokenizer.from_pretrained("t5-small" )
a = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
a = tokenizer(__snake_case , return_tensors="pt" ).input_ids
a = model.generate(__snake_case , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 227 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "vit_mae"
def __init__( self : Dict , A : List[str]=7_68 , A : Any=12 , A : Union[str, Any]=12 , A : Tuple=30_72 , A : Any="gelu" , A : Tuple=0.0 , A : List[str]=0.0 , A : Tuple=0.02 , A : Tuple=1e-12 , A : int=2_24 , A : Dict=16 , A : int=3 , A : Tuple=True , A : Tuple=16 , A : Optional[Any]=5_12 , A : Union[str, Any]=8 , A : List[Any]=20_48 , A : Dict=0.75 , A : Any=False , **A : Optional[int] , ) -> Union[str, Any]:
super().__init__(**A )
lowercase_ : List[Any] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : int = initializer_range
lowercase_ : Dict = layer_norm_eps
lowercase_ : Optional[Any] = image_size
lowercase_ : str = patch_size
lowercase_ : Dict = num_channels
lowercase_ : Any = qkv_bias
lowercase_ : Union[str, Any] = decoder_num_attention_heads
lowercase_ : Optional[Any] = decoder_hidden_size
lowercase_ : List[str] = decoder_num_hidden_layers
lowercase_ : List[Any] = decoder_intermediate_size
lowercase_ : Optional[Any] = mask_ratio
lowercase_ : Optional[Any] = norm_pix_loss
| 33 | 0 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = SpeechTaTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
def _a (self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Any = SpeechTaTokenizer(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = AddedToken("""<mask>""" , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = """this is a test"""
UpperCAmelCase__ : List[str] = """this is a test"""
return input_text, output_text
def _a (self , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=20 , _lowerCamelCase=5 ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : int = self.get_input_output_texts(_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
UpperCAmelCase__ : str = tokenizer.decode(_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
return text, ids
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = """<pad>"""
UpperCAmelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(_lowerCamelCase ) , 81 )
def _a (self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase__ : int = tokenizer.vocab_size
UpperCAmelCase__ : Union[str, Any] = len(_lowerCamelCase )
self.assertNotEqual(_lowerCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase__ : Any = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
UpperCAmelCase__ : Any = tokenizer.add_tokens(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer.vocab_size
UpperCAmelCase__ : Any = len(_lowerCamelCase )
self.assertNotEqual(_lowerCamelCase , 0 )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , len(_lowerCamelCase ) )
self.assertEqual(_lowerCamelCase , all_size + len(_lowerCamelCase ) )
UpperCAmelCase__ : int = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=_lowerCamelCase )
self.assertGreaterEqual(len(_lowerCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCAmelCase__ : int = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
UpperCAmelCase__ : Dict = tokenizer.add_special_tokens(_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = tokenizer.vocab_size
UpperCAmelCase__ : Optional[int] = len(_lowerCamelCase )
self.assertNotEqual(_lowerCamelCase , 0 )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , len(_lowerCamelCase ) )
self.assertEqual(_lowerCamelCase , all_size_a + len(_lowerCamelCase ) )
UpperCAmelCase__ : int = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=_lowerCamelCase )
self.assertGreaterEqual(len(_lowerCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = self.get_tokenizer()
UpperCAmelCase__ : Optional[Any] = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(_lowerCamelCase , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
UpperCAmelCase__ : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCamelCase , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
UpperCAmelCase__ : int = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
# fmt: off
self.assertListEqual(_lowerCamelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
UpperCAmelCase__ : Tuple = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
UpperCAmelCase__ : Any = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=_lowerCamelCase , )
| 166 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , *_lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
"""simple docstring"""
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : List[str] = eval_examples
UpperCAmelCase__ : List[Any] = post_process_function
def _a (self , _lowerCamelCase = None , _lowerCamelCase=None , _lowerCamelCase = None , _lowerCamelCase = "eval" , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = gen_kwargs.copy()
UpperCAmelCase__ : Optional[Any] = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
UpperCAmelCase__ : int = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
UpperCAmelCase__ : int = gen_kwargs
UpperCAmelCase__ : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase__ : Tuple = self.get_eval_dataloader(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase__ : List[str] = self.compute_metrics
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase__ : int = eval_loop(
_lowerCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , )
finally:
UpperCAmelCase__ : Any = compute_metrics
UpperCAmelCase__ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase__ : List[str] = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase__ : Tuple = metrics.pop(_lowerCamelCase )
metrics.update(output.metrics )
else:
UpperCAmelCase__ : List[str] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase__ : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCamelCase )
return metrics
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase = "test" , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = gen_kwargs.copy()
UpperCAmelCase__ : List[Any] = self.get_test_dataloader(_lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase__ : Any = self.compute_metrics
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : str = time.time()
UpperCAmelCase__ : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase__ : List[str] = eval_loop(
_lowerCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , )
finally:
UpperCAmelCase__ : int = compute_metrics
UpperCAmelCase__ : Any = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase__ : Optional[Any] = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , """predict""" )
UpperCAmelCase__ : List[str] = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase__ : str = metrics.pop(_lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCamelCase )
| 166 | 1 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
_snake_case = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=A__ , exist_ok=A__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , use_external_data_format=A__ , enable_onnx_checker=A__ , opset_version=A__ , )
else:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , opset_version=A__ , )
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowerCAmelCase : str = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
_lowerCAmelCase : int = "cpu"
_lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(A__ , torch_dtype=A__ ).to(A__ )
_lowerCAmelCase : List[str] = Path(A__ )
# TEXT ENCODER
_lowerCAmelCase : List[Any] = pipeline.text_encoder.config.max_position_embeddings
_lowerCAmelCase : List[Any] = pipeline.text_encoder.config.hidden_size
_lowerCAmelCase : str = pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=A__ , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=A__ , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=A__ , )
del pipeline.text_encoder
# UNET
_lowerCAmelCase : List[str] = pipeline.unet.config.in_channels
_lowerCAmelCase : List[str] = pipeline.unet.config.sample_size
_lowerCAmelCase : List[Any] = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , A__ , A__ , A__ ).to(device=A__ , dtype=A__ ),
torch.randn(2 ).to(device=A__ , dtype=A__ ),
torch.randn(2 , A__ , A__ ).to(device=A__ , dtype=A__ ),
False,
) , output_path=A__ , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=A__ , use_external_data_format=A__ , )
_lowerCAmelCase : Dict = str(unet_path.absolute().as_posix() )
_lowerCAmelCase : Optional[Any] = os.path.dirname(A__ )
_lowerCAmelCase : Union[str, Any] = onnx.load(A__ )
# clean up existing tensor files
shutil.rmtree(A__ )
os.mkdir(A__ )
# collate external tensor files into one
onnx.save_model(
A__ , A__ , save_as_external_data=A__ , all_tensors_to_one_file=A__ , location="weights.pb" , convert_attribute=A__ , )
del pipeline.unet
# VAE ENCODER
_lowerCAmelCase : Dict = pipeline.vae
_lowerCAmelCase : Any = vae_encoder.config.in_channels
_lowerCAmelCase : List[Any] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
_lowerCAmelCase : List[str] = lambda _lowerCamelCase , _lowerCamelCase : vae_encoder.encode(A__ , A__ )[0].sample()
onnx_export(
A__ , model_args=(
torch.randn(1 , A__ , A__ , A__ ).to(device=A__ , dtype=A__ ),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=A__ , )
# VAE DECODER
_lowerCAmelCase : List[str] = pipeline.vae
_lowerCAmelCase : str = vae_decoder.config.latent_channels
_lowerCAmelCase : Optional[int] = vae_decoder.config.out_channels
# forward only through the decoder part
_lowerCAmelCase : Dict = vae_encoder.decode
onnx_export(
A__ , model_args=(
torch.randn(1 , A__ , A__ , A__ ).to(device=A__ , dtype=A__ ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=A__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
_lowerCAmelCase : Optional[int] = pipeline.safety_checker
_lowerCAmelCase : Any = safety_checker.config.vision_config.num_channels
_lowerCAmelCase : str = safety_checker.config.vision_config.image_size
_lowerCAmelCase : List[Any] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , A__ , A__ , A__ , ).to(device=A__ , dtype=A__ ),
torch.randn(1 , A__ , A__ , A__ ).to(device=A__ , dtype=A__ ),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=A__ , )
del pipeline.safety_checker
_lowerCAmelCase : Union[str, Any] = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
_lowerCAmelCase : List[str] = pipeline.feature_extractor
else:
_lowerCAmelCase : Any = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : str = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=A__ , feature_extractor=A__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(A__ )
print("ONNX pipeline saved to" , A__ )
del pipeline
del onnx_pipeline
_lowerCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(A__ , provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
_snake_case = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 36 |
'''simple docstring'''
import math
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
UpperCamelCase = n
UpperCamelCase = [
[math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ )
] # adjacency matrix for weight
UpperCamelCase = [
[math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ )
] # dp[i][j] stores minimum distance from i to j
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = w
def A ( self : str ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
_lowerCamelCase : List[str] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 28 | 0 |
# Function to print upper half of diamond (pyramid)
def A__ ( UpperCAmelCase_ ):
for i in range(0 , UpperCamelCase__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def A__ ( UpperCAmelCase_ ):
for i in range(UpperCamelCase__ , 0 , -1 ):
for _ in range(UpperCamelCase__ , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def A__ ( UpperCAmelCase_ ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(UpperCamelCase__ ) # upper half
reverse_floyd(UpperCamelCase__ ) # lower half
if __name__ == "__main__":
print(r'| /\ | |- | |- |--| |\ /| |-')
print(r'|/ \| |- |_ |_ |__| | \/ | |_')
snake_case_ : Tuple = 1
while K:
snake_case_ : Tuple = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
snake_case_ : Dict = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 363 |
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_UpperCamelCase : str = (low + high) // 2
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Dict = max_subarray(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = max_subarray(UpperCAmelCase_ , mid + 1 , UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Tuple = max_cross_sum(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Optional[Any] = float('-inf' ), -1
_UpperCamelCase , _UpperCamelCase : int = float('-inf' ), -1
_UpperCamelCase : int | float = 0
for i in range(UpperCAmelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_UpperCamelCase : Optional[int] = summ
_UpperCamelCase : Union[str, Any] = i
_UpperCamelCase : List[Any] = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_UpperCamelCase : List[Any] = summ
_UpperCamelCase : List[str] = i
return max_left, max_right, (left_sum + right_sum)
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[str] = [randint(1 , UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ )]
_UpperCamelCase : Optional[Any] = time.time()
max_subarray(UpperCAmelCase_ , 0 , input_size - 1 )
_UpperCamelCase : str = time.time()
return end - start
def A__ ( ):
_UpperCamelCase : Any = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
_UpperCamelCase : Dict = [time_max_subarray(UpperCAmelCase_ ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
print(UpperCAmelCase_ , '\t\t' , UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 236 | 0 |
def A_ ( _lowerCAmelCase = 1000 ) -> int:
UpperCamelCase , UpperCamelCase : Any = 1, 1
UpperCamelCase : Dict = []
for i in range(1 , n + 1 ):
UpperCamelCase : Union[str, Any] = prev_numerator + 2 * prev_denominator
UpperCamelCase : List[Any] = prev_numerator + prev_denominator
if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ):
result.append(_lowerCAmelCase )
UpperCamelCase : Dict = numerator
UpperCamelCase : Dict = denominator
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 52 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowercase (unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = 0
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(snake_case__ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(snake_case__ ) , 0 )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoConfig.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
# Check that tokenizer_type ≠ model_type
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , config=snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(snake_case__ , "vocab.txt" ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type="bert" , use_fast=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(snake_case__ , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(snake_case__ , "merges.txt" ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type="gpt2" , use_fast=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(snake_case__ , "vocab.txt" ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type="bert" )
self.assertIsInstance(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(snake_case__ , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(snake_case__ , "merges.txt" ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type="gpt2" )
self.assertIsInstance(snake_case__ , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
with pytest.raises(snake_case__ ):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCamelCase_ = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
if isinstance(snake_case__ , snake_case__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , snake_case__ )
else:
self.assertEqual(tokenizer.do_lower_case , snake_case__ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
snake_case__ , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
UpperCamelCase_ = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = TOKENIZER_MAPPING.values()
UpperCamelCase_ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(snake_case__ )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=snake_case__ ) , snake_case__ )
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , snake_case__ )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=snake_case__ )
UpperCamelCase_ = "Hello, world. How are you?"
UpperCamelCase_ = tokenizer.tokenize(snake_case__ )
self.assertEqual("[UNK]" , tokens[0] )
UpperCamelCase_ = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=snake_case__ )
UpperCamelCase_ = tokenizer.tokenize(snake_case__ )
self.assertEqual("[UNK]" , tokens[0] )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" )
self.assertEqual(type(snake_case__ ) , snake_case__ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , "[UNK]" )
self.assertEqual(tokenizer.padding_side , "right" )
self.assertEqual(tokenizer.truncation_side , "right" )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained("ctrl" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(snake_case__ , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = get_tokenizer_config("bert-base-cased" )
UpperCamelCase_ = config.pop("_commit_hash" , snake_case__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(snake_case__ , {"do_lower_case": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCamelCase_ = get_tokenizer_config(snake_case__ )
self.assertDictEqual(snake_case__ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = get_tokenizer_config(snake_case__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer" )
def _lowerCamelCase ( self ):
'''simple docstring'''
try:
AutoConfig.register("custom" , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
UpperCamelCase_ = CustomTokenizer.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
try:
AutoConfig.register("custom" , snake_case__ )
# Can register in two steps
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
snake_case__ , slow_tokenizer_class=snake_case__ , fast_tokenizer_class=snake_case__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = BertTokenizerFast.from_pretrained(snake_case__ )
bert_tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = CustomTokenizerFast.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , use_fast=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(snake_case__ ):
UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , trust_remote_code=snake_case__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = False
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = NewTokenizer
lowercase__ = False
try:
AutoConfig.register("custom" , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ )
# If remote code is not set, the default is to use local
UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertTrue(tokenizer.special_attribute_present )
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def _lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case__ , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCamelCase_ = AutoTokenizer.from_pretrained("bert-base" )
def _lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , revision="aaaaaa" )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 128 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCamelCase ( self ):
__a : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
__a : Union[str, Any] = '''A painting of a squirrel eating a burger'''
__a : int = jax.device_count()
__a : List[Any] = num_samples * [prompt]
__a : Union[str, Any] = sd_pipe.prepare_inputs(_UpperCAmelCase )
__a : Optional[int] = replicate(_UpperCAmelCase )
__a : List[str] = shard(_UpperCAmelCase )
__a : Dict = jax.random.PRNGKey(0 )
__a : List[str] = jax.random.split(_UpperCAmelCase , jax.device_count() )
__a : Tuple = sd_pipe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_inference_steps=25 , jit=_UpperCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__a : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__a : str = images[0, 253:256, 253:256, -1]
__a : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__a : Tuple = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _lowerCamelCase ( self ):
__a : Optional[int] = '''stabilityai/stable-diffusion-2'''
__a : Optional[int] = FlaxDPMSolverMultistepScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
__a : int = FlaxStableDiffusionPipeline.from_pretrained(
_UpperCAmelCase , scheduler=_UpperCAmelCase , revision='''bf16''' , dtype=jnp.bfloataa , )
__a : Dict = scheduler_params
__a : Optional[Any] = '''A painting of a squirrel eating a burger'''
__a : List[str] = jax.device_count()
__a : Optional[int] = num_samples * [prompt]
__a : Optional[int] = sd_pipe.prepare_inputs(_UpperCAmelCase )
__a : List[str] = replicate(_UpperCAmelCase )
__a : Dict = shard(_UpperCAmelCase )
__a : Dict = jax.random.PRNGKey(0 )
__a : Optional[int] = jax.random.split(_UpperCAmelCase , jax.device_count() )
__a : Union[str, Any] = sd_pipe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_inference_steps=25 , jit=_UpperCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__a : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__a : str = images[0, 253:256, 253:256, -1]
__a : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__a : Any = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 350 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 188 | 0 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: str , lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: Any = OmegaConf.load(lowerCAmelCase__ )
UpperCAmelCase_: List[Any] = torch.load(lowerCAmelCase__ , map_location="""cpu""" )["""model"""]
UpperCAmelCase_: List[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
UpperCAmelCase_: int = {}
UpperCAmelCase_: str = """first_stage_model."""
for key in keys:
if key.startswith(lowerCAmelCase__ ):
UpperCAmelCase_: List[Any] = state_dict[key]
# extract state_dict for UNetLDM
UpperCAmelCase_: Optional[Any] = {}
UpperCAmelCase_: Tuple = """model.diffusion_model."""
for key in keys:
if key.startswith(lowerCAmelCase__ ):
UpperCAmelCase_: List[str] = state_dict[key]
UpperCAmelCase_: Tuple = config.model.params.first_stage_config.params
UpperCAmelCase_: Dict = config.model.params.unet_config.params
UpperCAmelCase_: List[Any] = VQModel(**lowerCAmelCase__ ).eval()
vqvae.load_state_dict(lowerCAmelCase__ )
UpperCAmelCase_: int = UNetLDMModel(**lowerCAmelCase__ ).eval()
unet.load_state_dict(lowerCAmelCase__ )
UpperCAmelCase_: Union[str, Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=lowerCAmelCase__ , )
UpperCAmelCase_: str = LDMPipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipeline.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
a : List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 147 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_ = "cpu", SCREAMING_SNAKE_CASE_ = "openai/clip-vit-large-patch14" ) -> None:
UpperCAmelCase_: Optional[Any] = device
UpperCAmelCase_: Optional[Any] = CLIPTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
UpperCAmelCase_: Optional[Any] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
UpperCAmelCase_: Optional[Any] = torchvision.transforms.Normalize(self.image_mean, self.image_std )
UpperCAmelCase_: Tuple = torchvision.transforms.Resize(224 )
UpperCAmelCase_: Any = torchvision.transforms.CenterCrop(224 )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCAmelCase_: Dict = self.resize(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = self.center_crop(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = self.normalize(SCREAMING_SNAKE_CASE_ )
return images
def __call__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCAmelCase_: Dict = self.tokenizer(text=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = self.preprocess_img(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module ):
def __init__(self, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=0.0_1, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="image", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, ) -> None:
super().__init__()
UpperCAmelCase_: List[Any] = None
UpperCAmelCase_: List[str] = device if device else get_device()
if vqgan:
UpperCAmelCase_: int = vqgan
else:
UpperCAmelCase_: Optional[Any] = load_vqgan(self.device, conf_path=SCREAMING_SNAKE_CASE_, ckpt_path=SCREAMING_SNAKE_CASE_ )
self.vqgan.eval()
if clip:
UpperCAmelCase_: List[str] = clip
else:
UpperCAmelCase_: Any = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
UpperCAmelCase_: Optional[int] = ProcessorGradientFlow(device=self.device )
UpperCAmelCase_: Optional[int] = iterations
UpperCAmelCase_: List[Any] = lr
UpperCAmelCase_: str = log
UpperCAmelCase_: Tuple = make_grid
UpperCAmelCase_: List[str] = return_val
UpperCAmelCase_: Dict = quantize
UpperCAmelCase_: int = self.vqgan.decoder.z_shape
def __snake_case (self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=True ) -> List[Any]:
UpperCAmelCase_: Tuple = []
if output_path is None:
UpperCAmelCase_: Optional[int] = """./animation.gif"""
if input_path is None:
UpperCAmelCase_: Tuple = self.save_path
UpperCAmelCase_: List[Any] = sorted(glob(input_path + """/*""" ) )
if not len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(SCREAMING_SNAKE_CASE_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
UpperCAmelCase_: Dict = total_duration / len(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = [frame_duration] * len(SCREAMING_SNAKE_CASE_ )
if extend_frames:
UpperCAmelCase_: List[str] = 1.5
UpperCAmelCase_: List[Any] = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(SCREAMING_SNAKE_CASE_ ) )
imageio.mimsave(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, duration=SCREAMING_SNAKE_CASE_ )
print(f'gif saved to {output_path}' )
def __snake_case (self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
UpperCAmelCase_: List[Any] = preprocess(Image.open(SCREAMING_SNAKE_CASE_ ), target_image_size=256 ).to(self.device )
UpperCAmelCase_: Union[str, Any] = preprocess_vqgan(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ , *UpperCAmelCase_: str = self.vqgan.encode(SCREAMING_SNAKE_CASE_ )
return z
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: List[Any] = self.latent.detach().requires_grad_()
UpperCAmelCase_: Optional[int] = base_latent + transform_vector
if self.quantize:
UpperCAmelCase_ , *UpperCAmelCase_: Optional[Any] = self.vqgan.quantize(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_: Tuple = trans_latent
return self.vqgan.decode(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> List[str]:
UpperCAmelCase_: Any = self.clip_preprocessor(text=SCREAMING_SNAKE_CASE_, images=SCREAMING_SNAKE_CASE_, return_tensors="""pt""", padding=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = self.clip(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = clip_outputs.logits_per_image
if weights is not None:
UpperCAmelCase_: Any = similarity_logits * weights
return similarity_logits.sum()
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCAmelCase_: Dict = self._get_clip_similarity(pos_prompts["""prompts"""], SCREAMING_SNAKE_CASE_, weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
UpperCAmelCase_: Tuple = self._get_clip_similarity(neg_prompts["""prompts"""], SCREAMING_SNAKE_CASE_, weights=neg_prompts["""weights"""] )
else:
UpperCAmelCase_: Any = torch.tensor([1], device=self.device )
UpperCAmelCase_: List[str] = -torch.log(SCREAMING_SNAKE_CASE_ ) + torch.log(SCREAMING_SNAKE_CASE_ )
return loss
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCAmelCase_: Tuple = torch.randn_like(self.latent, requires_grad=SCREAMING_SNAKE_CASE_, device=self.device )
UpperCAmelCase_: str = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCAmelCase_: Optional[int] = self._add_vector(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = loop_post_process(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = self._get_CLIP_loss(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
print("""CLIP loss""", SCREAMING_SNAKE_CASE_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=SCREAMING_SNAKE_CASE_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
wandb.init(reinit=SCREAMING_SNAKE_CASE_, project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
UpperCAmelCase_: str = Image.open(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = image.resize((256, 256) )
wandb.log("""Original Image""", wandb.Image(SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if not prompts:
return []
UpperCAmelCase_: Tuple = []
UpperCAmelCase_: str = []
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Optional[Any] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(SCREAMING_SNAKE_CASE_, (tuple, list) ):
UpperCAmelCase_: str = prompt[0]
UpperCAmelCase_: List[str] = float(prompt[1] )
elif ":" in prompt:
UpperCAmelCase_ , UpperCAmelCase_: int = prompt.split(""":""" )
UpperCAmelCase_: int = float(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_: str = prompt
UpperCAmelCase_: Dict = 1.0
processed_prompts.append(SCREAMING_SNAKE_CASE_ )
weights.append(SCREAMING_SNAKE_CASE_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(SCREAMING_SNAKE_CASE_, device=self.device ),
}
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, ) -> Optional[Any]:
if image_path:
UpperCAmelCase_: Optional[int] = self._get_latent(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_: str = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCAmelCase_: List[Any] = self.process_prompts(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = self.process_prompts(SCREAMING_SNAKE_CASE_ )
if save_final and save_path is None:
UpperCAmelCase_: Optional[int] = os.path.join("""./outputs/""", """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_: List[str] = save_path + """_""" + get_timestamp()
os.makedirs(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = save_path
UpperCAmelCase_: Optional[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: Tuple = loop_post_process(SCREAMING_SNAKE_CASE_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) ):
if show_intermediate:
show_pil(SCREAMING_SNAKE_CASE_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(SCREAMING_SNAKE_CASE_ )} )
if show_final:
show_pil(SCREAMING_SNAKE_CASE_ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f'iter_{iter:03d}_final.png' ) )
| 147 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 83 |
'''simple docstring'''
import requests
lowercase__ = "" # <-- Put your OpenWeatherMap appid here!
lowercase__ = "https://api.openweathermap.org/data/2.5/"
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = "Chicago" , SCREAMING_SNAKE_CASE__ = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + '''weather''' , params=locals() ).json()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = "Kolkata, India" , SCREAMING_SNAKE_CASE__ = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + '''forecast''' , params=locals() ).json()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = 55.68 , SCREAMING_SNAKE_CASE__ = 12.57 , SCREAMING_SNAKE_CASE__ = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + '''onecall''' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowercase__ = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 83 | 1 |
from math import factorial, radians
def __UpperCamelCase ( _A , _A = 18 , _A = 10 ):
lowerCAmelCase_ = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0)
# Converting from degrees to radians
lowerCAmelCase_ = radians(_A )
lowerCAmelCase_ = angle_in_radians
lowerCAmelCase_ = 3
lowerCAmelCase_ = -1
for _ in range(_A ):
result += (b * (angle_in_radians**a)) / factorial(_A )
lowerCAmelCase_ = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_A , _A )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 278 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class A ( __UpperCAmelCase ):
__snake_case = 'vit'
def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=224, UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=True, UpperCamelCase__=16, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = qkv_bias
lowerCAmelCase_ = encoder_stride
class A ( __UpperCAmelCase ):
__snake_case = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 1E-4
| 278 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """ibert"""
def __init__( self : Tuple , __lowercase : Dict=3_05_22 , __lowercase : Any=7_68 , __lowercase : Any=12 , __lowercase : List[str]=12 , __lowercase : int=30_72 , __lowercase : List[Any]="gelu" , __lowercase : List[str]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : int=5_12 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : List[Any]=1e-12 , __lowercase : Optional[int]=1 , __lowercase : str=0 , __lowercase : str=2 , __lowercase : Union[str, Any]="absolute" , __lowercase : Union[str, Any]=False , __lowercase : Dict="none" , **__lowercase : Optional[int] , ) -> str:
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : int =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] =num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_act
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : str =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : str =max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] =type_vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range
SCREAMING_SNAKE_CASE__ : int =layer_norm_eps
SCREAMING_SNAKE_CASE__ : str =position_embedding_type
SCREAMING_SNAKE_CASE__ : int =quant_mode
SCREAMING_SNAKE_CASE__ : Optional[Any] =force_dequant
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
@property
def __magic_name__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : List[str] ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 222 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a_ = 'src/diffusers'
# Matches is_xxx_available()
a_ = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
a_ = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
a_ = '\n{0} = None\n'
a_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
a_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def _a( UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =_re_backend.findall(UpperCamelCase__ )
if len(UpperCamelCase__ ) == 0:
return None
return "_and_".join(UpperCamelCase__ )
def _a( ):
'''simple docstring'''
with open(os.path.join(UpperCamelCase__, '''__init__.py''' ), '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ : List[str] =f.readlines()
# Get to the point we do the actual imports for type checking
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : List[str] ={}
# Go through the end of the file
while line_index < len(UpperCamelCase__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
SCREAMING_SNAKE_CASE__ : List[Any] =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ : List[Any] =[]
# Until we unindent, add backend objects to the list
while line_index < len(UpperCamelCase__ ) and len(lines[line_index] ) > 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] =lines[line_index]
SCREAMING_SNAKE_CASE__ : Optional[Any] =_re_single_line_import.search(UpperCamelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(UpperCamelCase__ ) > 0:
SCREAMING_SNAKE_CASE__ : Any =objects
else:
line_index += 1
return backend_specific_objects
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[Any] ):
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(UpperCamelCase__ )
elif name.islower():
return DUMMY_FUNCTION.format(UpperCamelCase__, UpperCamelCase__ )
else:
return DUMMY_CLASS.format(UpperCamelCase__, UpperCamelCase__ )
def _a( UpperCamelCase__ : Any=None ):
'''simple docstring'''
if backend_specific_objects is None:
SCREAMING_SNAKE_CASE__ : int =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
for backend, objects in backend_specific_objects.items():
SCREAMING_SNAKE_CASE__ : Tuple ='''[''' + ''', '''.join(f"\"{b}\"" for b in backend.split('''_and_''' ) ) + ''']'''
SCREAMING_SNAKE_CASE__ : List[str] ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(UpperCamelCase__, UpperCamelCase__ ) for o in objects] )
SCREAMING_SNAKE_CASE__ : Tuple =dummy_file
return dummy_files
def _a( UpperCamelCase__ : Any=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
SCREAMING_SNAKE_CASE__ : List[str] ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
SCREAMING_SNAKE_CASE__ : List[str] =os.path.join(UpperCamelCase__, '''utils''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={
backend: os.path.join(UpperCamelCase__, f"dummy_{short_names.get(UpperCamelCase__, UpperCamelCase__ )}_objects.py" )
for backend in dummy_files.keys()
}
SCREAMING_SNAKE_CASE__ : str ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(UpperCamelCase__ ):
with open(UpperCamelCase__, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] =f.read()
else:
SCREAMING_SNAKE_CASE__ : int =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"Updating diffusers.utils.dummy_{short_names.get(UpperCamelCase__, UpperCamelCase__ )}_objects.py as the main "
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend], '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
f"diffusers.utils.dummy_{short_names.get(UpperCamelCase__, UpperCamelCase__ )}_objects.py. Run `make fix-copies` "
'''to fix this.''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a_ = parser.parse_args()
check_dummies(args.fix_and_overwrite) | 222 | 1 |
from __future__ import annotations
from collections.abc import Callable
lowercase = list[list[float | int]]
def __UpperCAmelCase ( a_ , a_):
snake_case_ = len(a_)
snake_case_ = [[0 for _ in range(size + 1)] for _ in range(a_)]
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
for row in range(a_):
for col in range(a_):
snake_case_ = matrix[row][col]
snake_case_ = vector[row][0]
snake_case_ = 0
snake_case_ = 0
while row < size and col < size:
# pivoting
snake_case_ = max((abs(augmented[rowa][col]), rowa) for rowa in range(a_ , a_))[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
snake_case_ , snake_case_ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , a_):
snake_case_ = augmented[rowa][col] / augmented[row][col]
snake_case_ = 0
for cola in range(col + 1 , size + 1):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , a_):
for row in range(a_):
snake_case_ = augmented[row][col] / augmented[col][col]
for cola in range(a_ , size + 1):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10)] for row in range(a_)
]
def __UpperCAmelCase ( a_):
snake_case_ = len(a_)
snake_case_ = [[0 for _ in range(a_)] for _ in range(a_)]
snake_case_ = [[0] for _ in range(a_)]
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
for x_val, y_val in enumerate(a_):
for col in range(a_):
snake_case_ = (x_val + 1) ** (size - col - 1)
snake_case_ = y_val
snake_case_ = solve(a_ , a_)
def interpolated_func(a_) -> int:
return sum(
round(coeffs[x_val][0]) * (var ** (size - x_val - 1))
for x_val in range(a_))
return interpolated_func
def __UpperCAmelCase ( a_):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __UpperCAmelCase ( a_ = question_function , a_ = 10):
snake_case_ = [func(a_) for x_val in range(1 , order + 1)]
snake_case_ = [
interpolate(data_points[:max_coeff]) for max_coeff in range(1 , order + 1)
]
snake_case_ = 0
snake_case_ = 42
snake_case_ = 42
for poly in polynomials:
snake_case_ = 1
while func(a_) == poly(a_):
x_val += 1
ret += poly(a_)
return ret
if __name__ == "__main__":
print(f'{solution() = }')
| 178 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = ['''image_processor''', '''tokenizer''']
lowerCAmelCase = '''BlipImageProcessor'''
lowerCAmelCase = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , a , a ) -> Tuple:
snake_case_ = False
super().__init__(a , a )
snake_case_ = self.image_processor
def __call__( self , a = None , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = False , a = False , a = False , a = False , a = False , a = True , a = None , **a , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
snake_case_ = self.tokenizer
snake_case_ = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
# add pixel_values
snake_case_ = self.image_processor(a , return_tensors=a )
if text is not None:
snake_case_ = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
else:
snake_case_ = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def _UpperCamelCase ( self , *a , **a ) -> int:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCamelCase ( self , *a , **a ) -> Any:
return self.tokenizer.decode(*a , **a )
@property
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 178 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case = 1_00 ) -> int:
"""simple docstring"""
_UpperCamelCase = set()
_UpperCamelCase = 0
_UpperCamelCase = n + 1 # maximum limit
for a in range(2, __snake_case ):
for b in range(2, __snake_case ):
_UpperCamelCase = a**b # calculates the current power
collect_powers.add(__snake_case ) # adds the result to the set
return len(__snake_case )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 100 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a , __a , __a = None , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(transformer=__a , vae=__a , scheduler=__a)
# create a imagenet -> id dictionary for easier use
_UpperCamelCase = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''','''):
_UpperCamelCase = int(__a)
_UpperCamelCase = dict(sorted(self.labels.items()))
def UpperCAmelCase ( self , __a) -> List[int]:
'''simple docstring'''
if not isinstance(__a , __a):
_UpperCamelCase = list(__a)
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''')
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , __a , __a = 4.0 , __a = None , __a = 50 , __a = "pil" , __a = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
_UpperCamelCase = len(__a)
_UpperCamelCase = self.transformer.config.sample_size
_UpperCamelCase = self.transformer.config.in_channels
_UpperCamelCase = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__a , device=self.device , dtype=self.transformer.dtype , )
_UpperCamelCase = torch.cat([latents] * 2) if guidance_scale > 1 else latents
_UpperCamelCase = torch.tensor(__a , device=self.device).reshape(-1)
_UpperCamelCase = torch.tensor([10_00] * batch_size , device=self.device)
_UpperCamelCase = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__a)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
_UpperCamelCase = latent_model_input[: len(__a) // 2]
_UpperCamelCase = torch.cat([half, half] , dim=0)
_UpperCamelCase = self.scheduler.scale_model_input(__a , __a)
_UpperCamelCase = t
if not torch.is_tensor(__a):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_UpperCamelCase = latent_model_input.device.type == '''mps'''
if isinstance(__a , __a):
_UpperCamelCase = torch.floataa if is_mps else torch.floataa
else:
_UpperCamelCase = torch.intaa if is_mps else torch.intaa
_UpperCamelCase = torch.tensor([timesteps] , dtype=__a , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
_UpperCamelCase = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCamelCase = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
_UpperCamelCase = self.transformer(
__a , timestep=__a , class_labels=__a).sample
# perform guidance
if guidance_scale > 1:
_UpperCamelCase , _UpperCamelCase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_UpperCamelCase , _UpperCamelCase = torch.split(__a , len(__a) // 2 , dim=0)
_UpperCamelCase = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_UpperCamelCase = torch.cat([half_eps, half_eps] , dim=0)
_UpperCamelCase = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_UpperCamelCase , _UpperCamelCase = torch.split(__a , __a , dim=1)
else:
_UpperCamelCase = noise_pred
# compute previous image: x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(__a , __a , __a).prev_sample
if guidance_scale > 1:
_UpperCamelCase , _UpperCamelCase = latent_model_input.chunk(2 , dim=0)
else:
_UpperCamelCase = latent_model_input
_UpperCamelCase = 1 / self.vae.config.scaling_factor * latents
_UpperCamelCase = self.vae.decode(__a).sample
_UpperCamelCase = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_UpperCamelCase = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__a)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__a)
| 100 | 1 |
import enum
import shutil
import sys
snake_case__ , snake_case__ : List[str] = shutil.get_terminal_size()
snake_case__ : Tuple = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class A_ ( enum.Enum ):
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
def _a ( lowerCamelCase: List[str] , lowerCamelCase: str="" ) -> Union[str, Any]:
'''simple docstring'''
sys.stdout.write(str(lowerCamelCase ) + end )
sys.stdout.flush()
def _a ( lowerCamelCase: Optional[Any] , lowerCamelCase: List[Any] , lowerCamelCase: Optional[Any]="" ) -> Tuple:
'''simple docstring'''
forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , lowerCamelCase )
def _a ( ) -> List[str]:
'''simple docstring'''
forceWrite('''\r''' )
def _a ( lowerCamelCase: int , lowerCamelCase: str ) -> Dict:
'''simple docstring'''
forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def _a ( ) -> Optional[Any]:
'''simple docstring'''
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def _a ( ) -> str:
'''simple docstring'''
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 117 |
def _a ( lowerCamelCase: int = 2_00 ) -> int:
'''simple docstring'''
__A = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
__A = [0] * (pence + 1)
__A = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 117 | 1 |
"""simple docstring"""
import numpy as np
def __lowercase ( __lowerCAmelCase : np.array ):
return 1 / (1 + np.exp(-vector ))
def __lowercase ( __lowerCAmelCase : np.array ):
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
a__ , a__ = array[indexa], array[indexa]
def __lowercase ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if length > 1:
a__ = int(length / 2 )
for i in range(__lowerCAmelCase , low + middle ):
comp_and_swap(__lowerCAmelCase , __lowerCAmelCase , i + middle , __lowerCAmelCase )
bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
bitonic_merge(__lowerCAmelCase , low + middle , __lowerCAmelCase , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if length > 1:
a__ = int(length / 2 )
bitonic_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 )
bitonic_sort(__lowerCAmelCase , low + middle , __lowerCAmelCase , 0 )
bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
snake_case : int = input('''Enter numbers separated by a comma:\n''').strip()
snake_case : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 109 | 0 |
from math import log
from scipy.constants import Boltzmann, physical_constants
lowercase : Dict = 3_0_0 # TEMPERATURE (unit = K)
def A_ ( A__ , A__ , A__ , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __SCREAMING_SNAKE_CASE ) -> bool:
__lowerCAmelCase: Tuple = str(__SCREAMING_SNAKE_CASE )
return len(__SCREAMING_SNAKE_CASE ) == 9 and set(__SCREAMING_SNAKE_CASE ) == set("123456789" )
def a__ ( ) -> int | None:
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
__lowerCAmelCase: Tuple = 1_0_0_0_0_2 * base_num
if is_9_pandigital(__SCREAMING_SNAKE_CASE ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
__lowerCAmelCase: int = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(__SCREAMING_SNAKE_CASE ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 217 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowercase : Dict = logging.get_logger(__name__)
lowercase : Optional[int] = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : Any = 'deberta-v2'
def __init__( self , __UpperCamelCase=12_81_00 , __UpperCamelCase=15_36 , __UpperCamelCase=24 , __UpperCamelCase=24 , __UpperCamelCase=61_44 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=0 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-7 , __UpperCamelCase=False , __UpperCamelCase=-1 , __UpperCamelCase=0 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=0 , __UpperCamelCase="gelu" , **__UpperCamelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
__UpperCamelCase : Dict = hidden_size
__UpperCamelCase : Optional[int] = num_hidden_layers
__UpperCamelCase : List[str] = num_attention_heads
__UpperCamelCase : Union[str, Any] = intermediate_size
__UpperCamelCase : Tuple = hidden_act
__UpperCamelCase : Optional[Any] = hidden_dropout_prob
__UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCamelCase : List[Any] = max_position_embeddings
__UpperCamelCase : int = type_vocab_size
__UpperCamelCase : Tuple = initializer_range
__UpperCamelCase : Optional[Any] = relative_attention
__UpperCamelCase : Union[str, Any] = max_relative_positions
__UpperCamelCase : List[Any] = pad_token_id
__UpperCamelCase : Tuple = position_biased_input
# Backwards compatibility
if type(__UpperCamelCase ) == str:
__UpperCamelCase : int = [x.strip() for x in pos_att_type.lower().split("|" )]
__UpperCamelCase : Dict = pos_att_type
__UpperCamelCase : Union[str, Any] = vocab_size
__UpperCamelCase : Tuple = layer_norm_eps
__UpperCamelCase : int = kwargs.get("pooler_hidden_size" , __UpperCamelCase )
__UpperCamelCase : Optional[Any] = pooler_dropout
__UpperCamelCase : Tuple = pooler_hidden_act
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCamelCase : List[Any] = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
return 12
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = 3 , __UpperCamelCase = 40 , __UpperCamelCase = 40 , __UpperCamelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__UpperCamelCase : Dict = super().generate_dummy_inputs(preprocessor=__UpperCamelCase , framework=__UpperCamelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs | 171 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=[10, 20, 30, 40] , __UpperCamelCase=[2, 2, 3, 2] , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=["stage2", "stage3", "stage4"] , __UpperCamelCase=3 , __UpperCamelCase=None , ) -> str:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = parent
__UpperCamelCase : List[Any] = batch_size
__UpperCamelCase : Union[str, Any] = image_size
__UpperCamelCase : Any = num_channels
__UpperCamelCase : Union[str, Any] = num_stages
__UpperCamelCase : List[Any] = hidden_sizes
__UpperCamelCase : Optional[Any] = depths
__UpperCamelCase : Dict = is_training
__UpperCamelCase : List[Any] = use_labels
__UpperCamelCase : str = intermediate_size
__UpperCamelCase : int = hidden_act
__UpperCamelCase : Tuple = type_sequence_label_size
__UpperCamelCase : List[Any] = initializer_range
__UpperCamelCase : List[Any] = out_features
__UpperCamelCase : Optional[Any] = num_labels
__UpperCamelCase : Optional[Any] = scope
__UpperCamelCase : List[Any] = num_stages
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : int = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__UpperCamelCase , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = UperNetForSemanticSegmentation(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : int = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
__UpperCamelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase : Any = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase : Dict = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase : Union[str, Any] = False
lowercase : Tuple = False
lowercase : Optional[int] = False
lowercase : Tuple = False
lowercase : List[str] = False
lowercase : Any = False
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Tuple = UperNetModelTester(self )
__UpperCamelCase : str = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
return
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Tuple = model_class(__UpperCamelCase )
__UpperCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : List[str] = [*signature.parameters.keys()]
__UpperCamelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
pass
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__UpperCamelCase : int = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase : Any = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
__UpperCamelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCamelCase : int = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCamelCase , __UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Any = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase : List[str] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Tuple = _config_zero_init(__UpperCamelCase )
__UpperCamelCase : int = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__UpperCamelCase : List[str] = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="UperNet does not have tied weights" )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : str = UperNetForSemanticSegmentation.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def UpperCAmelCase_ ():
__UpperCamelCase : Union[str, Any] = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
__UpperCamelCase : List[str] = Image.open(_lowerCAmelCase ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
__UpperCamelCase : Dict = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(__UpperCamelCase )
__UpperCamelCase : Dict = prepare_img()
__UpperCamelCase : Any = processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
with torch.no_grad():
__UpperCamelCase : Any = model(**__UpperCamelCase )
__UpperCamelCase : Tuple = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
__UpperCamelCase : Union[str, Any] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
__UpperCamelCase : List[Any] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(__UpperCamelCase )
__UpperCamelCase : Dict = prepare_img()
__UpperCamelCase : int = processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
with torch.no_grad():
__UpperCamelCase : int = model(**__UpperCamelCase )
__UpperCamelCase : Dict = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
__UpperCamelCase : Union[str, Any] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1E-4 ) ) | 171 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=0 , ) -> Dict:
__lowerCamelCase : Any = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : Optional[Any] = seq_length
__lowerCamelCase : Tuple = is_training
__lowerCamelCase : Optional[int] = use_input_mask
__lowerCamelCase : Tuple = use_token_type_ids
__lowerCamelCase : Tuple = use_labels
__lowerCamelCase : Dict = vocab_size
__lowerCamelCase : Union[str, Any] = hidden_size
__lowerCamelCase : Optional[int] = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : List[str] = intermediate_size
__lowerCamelCase : Optional[int] = hidden_act
__lowerCamelCase : Union[str, Any] = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : Union[str, Any] = type_vocab_size
__lowerCamelCase : Optional[int] = type_sequence_label_size
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Any = num_labels
__lowerCamelCase : List[str] = num_choices
__lowerCamelCase : str = scope
__lowerCamelCase : List[Any] = projection_dim
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : Optional[int] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__lowerCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : Optional[Any] = None
if self.use_token_type_ids:
__lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : Dict = None
__lowerCamelCase : str = None
if self.use_labels:
__lowerCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase : Dict = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
__lowerCamelCase : List[str] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : Union[str, Any] = TFDPRContextEncoder(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : int = TFDPRQuestionEncoder(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
__lowerCamelCase : Optional[int] = TFDPRReader(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = config_and_inputs
__lowerCamelCase : int = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCamelCase : Tuple = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCamelCase : str = False
lowerCamelCase : str = False
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Any = False
lowerCamelCase : str = False
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Tuple = TFDPRModelTester(self )
__lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def lowercase_ ( self ) -> str:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*SCREAMING_SNAKE_CASE_ )
@slow
def lowercase_ ( self ) -> int:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[Any] = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : List[str] = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Union[str, Any] = TFDPRQuestionEncoder.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : List[str] = TFDPRReader.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_tf
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> Any:
__lowerCamelCase : List[Any] = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
__lowerCamelCase : Union[str, Any] = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
__lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__lowerCamelCase : Optional[int] = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 185 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Dict = ['image_processor']
lowerCamelCase : str = 'SamImageProcessor'
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self.image_processor
__lowerCamelCase : str = -10
__lowerCamelCase : List[str] = self.image_processor.size['longest_edge']
def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> BatchEncoding:
__lowerCamelCase : Union[str, Any] = self.image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# pop arguments that are not used in the foward but used nevertheless
__lowerCamelCase : List[Any] = encoding_image_processor['original_sizes']
if hasattr(SCREAMING_SNAKE_CASE_ , 'numpy' ): # Checks if Torch or TF tensor
__lowerCamelCase : Dict = original_sizes.numpy()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = self._check_and_preprocess_points(
input_points=SCREAMING_SNAKE_CASE_ , input_labels=SCREAMING_SNAKE_CASE_ , input_boxes=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Tuple = self._normalize_and_convert(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , input_points=SCREAMING_SNAKE_CASE_ , input_labels=SCREAMING_SNAKE_CASE_ , input_boxes=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , )
return encoding_image_processor
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="pt" , ) -> Optional[int]:
if input_points is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Union[str, Any] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , original_sizes[0] ) for point in input_points
]
else:
__lowerCamelCase : List[Any] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for point, original_size in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCamelCase , __lowerCamelCase : Tuple = self._pad_points_and_labels(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = np.array(SCREAMING_SNAKE_CASE_ )
if input_labels is not None:
__lowerCamelCase : Optional[int] = np.array(SCREAMING_SNAKE_CASE_ )
if input_boxes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Union[str, Any] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , original_sizes[0] , is_bounding_box=SCREAMING_SNAKE_CASE_ )
for box in input_boxes
]
else:
__lowerCamelCase : Optional[int] = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , is_bounding_box=SCREAMING_SNAKE_CASE_ )
for box, original_size in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
__lowerCamelCase : Optional[int] = np.array(SCREAMING_SNAKE_CASE_ )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCamelCase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# boxes batch size of 1 by default
__lowerCamelCase : Union[str, Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCamelCase : List[str] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# boxes batch size of 1 by default
__lowerCamelCase : str = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCamelCase : int = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
__lowerCamelCase : Dict = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCamelCase : Dict = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
__lowerCamelCase : Tuple = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCamelCase : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
__lowerCamelCase : Dict = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCamelCase : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
__lowerCamelCase : Dict = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : List[str] = max([point.shape[0] for point in input_points] )
__lowerCamelCase : Union[str, Any] = []
for i, point in enumerate(SCREAMING_SNAKE_CASE_ ):
if point.shape[0] != expected_nb_points:
__lowerCamelCase : Optional[int] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__lowerCamelCase : List[Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = processed_input_points
return input_points, input_labels
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> np.ndarray:
__lowerCamelCase , __lowerCamelCase : Tuple = original_size
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor._get_preprocess_shape(SCREAMING_SNAKE_CASE_ , longest_edge=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = deepcopy(SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
if is_bounding_box:
__lowerCamelCase : Optional[int] = coords.reshape(-1 , 2 , 2 )
__lowerCamelCase : List[Any] = coords[..., 0] * (new_w / old_w)
__lowerCamelCase : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCamelCase : Tuple = coords.reshape(-1 , 4 )
return coords
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ) -> Optional[Any]:
if input_points is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , 'numpy' ): # Checks for TF or Torch tensor
__lowerCamelCase : List[str] = input_points.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(input_points[0] , SCREAMING_SNAKE_CASE_ ):
raise ValueError('Input points must be a list of list of floating points.' )
__lowerCamelCase : str = [np.array(SCREAMING_SNAKE_CASE_ ) for input_point in input_points]
else:
__lowerCamelCase : Optional[Any] = None
if input_labels is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , 'numpy' ):
__lowerCamelCase : Union[str, Any] = input_labels.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(input_labels[0] , SCREAMING_SNAKE_CASE_ ):
raise ValueError('Input labels must be a list of list integers.' )
__lowerCamelCase : Any = [np.array(SCREAMING_SNAKE_CASE_ ) for label in input_labels]
else:
__lowerCamelCase : Dict = None
if input_boxes is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , 'numpy' ):
__lowerCamelCase : int = input_boxes.numpy().tolist()
if (
not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
or not isinstance(input_boxes[0] , SCREAMING_SNAKE_CASE_ )
or not isinstance(input_boxes[0][0] , SCREAMING_SNAKE_CASE_ )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
__lowerCamelCase : List[Any] = [np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCamelCase : Tuple = None
return input_points, input_labels, input_boxes
@property
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(SCREAMING_SNAKE_CASE_ ) )
def lowercase_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return self.image_processor.post_process_masks(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 185 | 1 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : List[Any] = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = '''autoformer'''
a_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : str = "student_t" , lowerCAmelCase_ : str = "nll" , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : int = 6_4 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 1_0_0 , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : int = 1_0 , lowerCAmelCase_ : int = 2_5 , lowerCAmelCase_ : int = 3 , **lowerCAmelCase_ : List[Any] , ) -> Optional[int]:
# time series specific configuration
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length if context_length is not None else prediction_length
__lowerCAmelCase = distribution_output
__lowerCAmelCase = loss
__lowerCAmelCase = input_size
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = scaling
__lowerCAmelCase = num_dynamic_real_features
__lowerCAmelCase = num_static_real_features
__lowerCAmelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__lowerCAmelCase = cardinality
else:
__lowerCAmelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__lowerCAmelCase = embedding_dimension
else:
__lowerCAmelCase = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCAmelCase = num_parallel_samples
# Transformer architecture configuration
__lowerCAmelCase = input_size * len(self.lags_sequence ) + self._number_of_features
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = use_cache
# Autoformer
__lowerCAmelCase = label_length
__lowerCAmelCase = moving_average
__lowerCAmelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=_a , **_a )
@property
def lowercase ( self : List[Any] ) -> Optional[int]:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 363 |
from collections.abc import Sequence
def a_ ( lowerCAmelCase_ : Sequence[float], lowerCAmelCase_ : bool = False ):
if not arr:
return 0
__lowerCAmelCase = 0 if allow_empty_subarrays else float('-inf' )
__lowerCAmelCase = 0.0
for num in arr:
__lowerCAmelCase = max(0 if allow_empty_subarrays else num, curr_sum + num )
__lowerCAmelCase = max(lowerCAmelCase_, lowerCAmelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case : Optional[Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""")
| 207 | 0 |
'''simple docstring'''
def _A ( snake_case = 50_00_00_00 ) -> str:
_lowercase : List[str] = set()
_lowercase : Union[str, Any] = int((limit - 24) ** (1 / 2) )
_lowercase : Dict = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , _lowerCAmelCase ) ) )
for primea in primes:
_lowercase : Dict = primea * primea
for primea in primes:
_lowercase : int = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
_lowercase : List[str] = primea * primea * primea * primea
_lowercase : Optional[int] = square + cube + tetr
if total >= limit:
break
ret.add(_lowerCAmelCase )
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 250 |
"""simple docstring"""
def A_ ( ):
"""simple docstring"""
_a = []
_a = 1
while len(_lowerCAmelCase ) < 1e6:
constant.append(str(_lowerCAmelCase ) )
i += 1
_a = ''''''.join(_lowerCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution()) | 320 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCAmelCase = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class A ( unittest.TestCase ):
UpperCamelCase_ : Union[str, Any] =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase_ : List[str] =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCamelCase_ : Dict ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCamelCase_ : Dict ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= ZeroShotClassificationPipeline(
model=__A , tokenizer=__A , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(__A , {'sequence': ANY(__A ), 'labels': [ANY(__A )], 'scores': [ANY(__A )]} )
# No kwarg
__lowercase= classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(__A , {'sequence': ANY(__A ), 'labels': [ANY(__A )], 'scores': [ANY(__A )]} )
__lowercase= classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(__A , {'sequence': ANY(__A ), 'labels': [ANY(__A )], 'scores': [ANY(__A )]} )
__lowercase= classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
__A , {'sequence': ANY(__A ), 'labels': [ANY(__A ), ANY(__A )], 'scores': [ANY(__A ), ANY(__A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
__lowercase= classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
__A , {'sequence': ANY(__A ), 'labels': [ANY(__A ), ANY(__A )], 'scores': [ANY(__A ), ANY(__A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
__lowercase= classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(__A , {'sequence': ANY(__A ), 'labels': [ANY(__A )], 'scores': [ANY(__A )]} )
# https://github.com/huggingface/transformers/issues/13846
__lowercase= classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
__A , [
{'sequence': ANY(__A ), 'labels': [ANY(__A ), ANY(__A )], 'scores': [ANY(__A ), ANY(__A )]}
for i in range(1 )
] , )
__lowercase= classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
__A , [
{'sequence': ANY(__A ), 'labels': [ANY(__A ), ANY(__A )], 'scores': [ANY(__A ), ANY(__A )]}
for i in range(2 )
] , )
with self.assertRaises(__A ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(__A ):
classifier(__A , candidate_labels='politics' )
with self.assertRaises(__A ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(__A ):
classifier('Who are you voting for in 2020?' , candidate_labels=__A )
with self.assertRaises(__A ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(__A ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=__A , )
self.run_entailment_id(__A )
def _A (self , lowerCAmelCase ):
__lowercase= zero_shot_classifier.model.config
__lowercase= config.labelaid
__lowercase= zero_shot_classifier.entailment_id
__lowercase= {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__lowercase= {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__lowercase= {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__lowercase= {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__lowercase= original_labelaid
self.assertEqual(__A , zero_shot_classifier.entailment_id )
@require_torch
def _A (self ):
__lowercase= pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 1_0_0 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def _A (self ):
__lowercase= pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
__lowercase= zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(__A ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def _A (self ):
__lowercase= pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
__lowercase= zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(__A ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def _A (self ):
__lowercase= pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
__lowercase= zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(__A ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
__lowercase= zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=__A , )
self.assertEqual(
nested_simplify(__A ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def _A (self ):
__lowercase= pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
__lowercase= zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(__A ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
__lowercase= zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=__A , )
self.assertEqual(
nested_simplify(__A ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 361 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= generator.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'cyberpunk 2077'
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= 'A painting of a squirrel eating a burger '
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.text_to_image(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 304 | 0 |
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class A__ ( _lowerCamelCase):
# to overwrite at feature extractactor specific tests
A_ : Tuple = None
A_ : Any = None
@property
def __lowerCamelCase ( self ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'feature_size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'sampling_rate' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'padding_value' ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : Optional[Any] = feat_extract.model_input_names[0]
__lowerCAmelCase : Optional[int] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) for x, y in zip(_SCREAMING_SNAKE_CASE , processed_features[input_name] ) ) )
__lowerCAmelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
__lowerCAmelCase : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowerCAmelCase : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : Dict = feat_extract.model_input_names[0]
__lowerCAmelCase : Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
__lowerCAmelCase : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowerCAmelCase : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : List[str] = feat_extract.model_input_names[0]
__lowerCAmelCase : Any = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
__lowerCAmelCase : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowerCAmelCase : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=False ):
def _inputs_have_equal_length(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = len(input[0] )
for input_slice in input[1:]:
if len(_SCREAMING_SNAKE_CASE ) != length:
return False
return True
def _inputs_are_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
return False
for input_slice_a, input_slice_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not np.allclose(np.asarray(_SCREAMING_SNAKE_CASE ) , np.asarray(_SCREAMING_SNAKE_CASE ) , atol=1E-3 ):
return False
return True
__lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = feat_extract.model_input_names[0]
__lowerCAmelCase : Tuple = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase : Optional[Any] = self.feat_extract_tester.seq_length_diff
__lowerCAmelCase : Optional[int] = self.feat_extract_tester.max_seq_length + pad_diff
__lowerCAmelCase : int = self.feat_extract_tester.min_seq_length
__lowerCAmelCase : List[str] = self.feat_extract_tester.batch_size
__lowerCAmelCase : Union[str, Any] = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__lowerCAmelCase : Dict = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = input_a[input_name]
__lowerCAmelCase : Dict = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' )
__lowerCAmelCase : Union[str, Any] = input_a[input_name]
__lowerCAmelCase : Optional[Any] = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[-1] ) )
__lowerCAmelCase : List[Any] = input_a[input_name]
__lowerCAmelCase : Union[str, Any] = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='np' )
__lowerCAmelCase : Any = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='max_length' )[input_name]
__lowerCAmelCase : List[Any] = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=_SCREAMING_SNAKE_CASE , return_tensors='np' )
__lowerCAmelCase : Dict = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
self.assertTrue(_inputs_are_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
__lowerCAmelCase : Optional[int] = feat_extract.pad(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=10 )
__lowerCAmelCase : Tuple = input_a[input_name]
__lowerCAmelCase : Tuple = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , pad_to_multiple_of=10 )
__lowerCAmelCase : Union[str, Any] = input_a[input_name]
__lowerCAmelCase : int = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , pad_to_multiple_of=10 , max_length=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = input_a[input_name]
__lowerCAmelCase : int = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , pad_to_multiple_of=10 , max_length=_SCREAMING_SNAKE_CASE , return_tensors='np' , )
__lowerCAmelCase : int = input_a[input_name]
self.assertTrue(all(len(_SCREAMING_SNAKE_CASE ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : List[str] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_SCREAMING_SNAKE_CASE ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
__lowerCAmelCase : List[str] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=False ):
def _inputs_have_equal_length(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = len(input[0] )
for input_slice in input[1:]:
if len(_SCREAMING_SNAKE_CASE ) != length:
return False
return True
def _inputs_are_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
return False
for input_slice_a, input_slice_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not np.allclose(np.asarray(_SCREAMING_SNAKE_CASE ) , np.asarray(_SCREAMING_SNAKE_CASE ) , atol=1E-3 ):
return False
return True
__lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(numpify=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = feat_extract.model_input_names[0]
__lowerCAmelCase : str = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
__lowerCAmelCase : List[Any] = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = input_a[input_name]
__lowerCAmelCase : Dict = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[0] ) )
__lowerCAmelCase : int = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
self.assertFalse(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
# truncate to smallest with np
__lowerCAmelCase : Optional[Any] = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : int = input_a[input_name]
__lowerCAmelCase : List[str] = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
__lowerCAmelCase : str = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
# truncate to middle
__lowerCAmelCase : Dict = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_SCREAMING_SNAKE_CASE , return_tensors='np' , )
__lowerCAmelCase : Tuple = input_a[input_name]
__lowerCAmelCase : Optional[Any] = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = input_a[input_name]
__lowerCAmelCase : Optional[int] = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
__lowerCAmelCase : Optional[Any] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
self.assertTrue(_inputs_are_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
feat_extract.pad(_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , truncation=_SCREAMING_SNAKE_CASE )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , truncation=_SCREAMING_SNAKE_CASE )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__lowerCAmelCase : str = 12
__lowerCAmelCase : Tuple = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : int = input_a[input_name]
__lowerCAmelCase : Optional[int] = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__lowerCAmelCase : Union[str, Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
__lowerCAmelCase : Optional[int] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
self.assertFalse(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) )
def __lowerCamelCase ( self ):
self._check_padding(numpify=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
self._check_padding(numpify=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
self._check_truncation(numpify=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
self._check_truncation(numpify=_SCREAMING_SNAKE_CASE )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : int = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase : List[str] = feat_extract.model_input_names[0]
__lowerCAmelCase : Tuple = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase : Any = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='np' )[input_name]
__lowerCAmelCase : Union[str, Any] = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : Dict = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase : Any = feat_extract.model_input_names[0]
__lowerCAmelCase : str = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase : str = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='np' )[input_name]
__lowerCAmelCase : Dict = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.feat_extract_dict
__lowerCAmelCase : int = True
__lowerCAmelCase : Tuple = self.feature_extraction_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase : Optional[int] = [len(_SCREAMING_SNAKE_CASE ) for x in speech_inputs]
__lowerCAmelCase : List[Any] = feat_extract.model_input_names[0]
__lowerCAmelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase : Tuple = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _SCREAMING_SNAKE_CASE )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.feat_extract_dict
__lowerCAmelCase : str = True
__lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase : Optional[int] = [len(_SCREAMING_SNAKE_CASE ) for x in speech_inputs]
__lowerCAmelCase : Tuple = feat_extract.model_input_names[0]
__lowerCAmelCase : List[Any] = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase : str = min(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = feat_extract.pad(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='np' )
self.assertIn('attention_mask' , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) | 86 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase (_UpperCamelCase = 100 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[Any] = 2
for i in range(2 , max_n + 1 ):
__lowerCAmelCase : Any = pre_numerator
__lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1
__lowerCAmelCase : int = cur_numerator
__lowerCAmelCase : Dict = e_cont * pre_numerator + temp
return sum_digits(_UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }') | 86 | 1 |
# Lint as: python3
import itertools
import os
import re
__UpperCAmelCase = re.compile(R'([A-Z]+)([A-Z][a-z])')
__UpperCAmelCase = re.compile(R'([a-z\d])([A-Z])')
__UpperCAmelCase = re.compile(R'(?<!_)_(?!_)')
__UpperCAmelCase = re.compile(R'(_{2,})')
__UpperCAmelCase = R'^\w+(\.\w+)*$'
__UpperCAmelCase = R'<>:/\|?*'
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = _uppercase_uppercase_re.sub(R'\1_\2' , __snake_case )
UpperCAmelCase_ : Dict = _lowercase_uppercase_re.sub(R'\1_\2' , __snake_case )
return name.lower()
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = _single_underscore_re.split(__snake_case )
UpperCAmelCase_ : Dict = [_multiple_underscores_re.split(__snake_case ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__snake_case ) if n != '' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
if os.path.basename(__snake_case ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__snake_case )
def lowercase__ ( __snake_case : Optional[int] , __snake_case : List[str] ):
'''simple docstring'''
if os.path.basename(__snake_case ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __snake_case ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(__snake_case )}-{split}"
def lowercase__ ( __snake_case : List[str] , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Optional[Any]=None ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = filename_prefix_for_split(__snake_case , __snake_case )
if filetype_suffix:
prefix += F".{filetype_suffix}"
UpperCAmelCase_ : Union[str, Any] = os.path.join(__snake_case , __snake_case )
return F"{filepath}*"
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : List[str]=None , __snake_case : Any=None ):
'''simple docstring'''
UpperCAmelCase_ : str = filename_prefix_for_split(__snake_case , __snake_case )
UpperCAmelCase_ : Union[str, Any] = os.path.join(__snake_case , __snake_case )
if shard_lengths:
UpperCAmelCase_ : List[str] = len(__snake_case )
UpperCAmelCase_ : Optional[Any] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__snake_case )]
if filetype_suffix:
UpperCAmelCase_ : Tuple = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCAmelCase_ : str = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 145 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 145 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __SCREAMING_SNAKE_CASE ) -> bool:
return len(set(__SCREAMING_SNAKE_CASE ) ) == len(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 217 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case ( unittest.TestCase ):
def lowercase_ ( self : Optional[int])-> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self : int)-> str:
'''simple docstring'''
__lowerCAmelCase: str = 1
__lowerCAmelCase: Union[str, Any] = 3
__lowerCAmelCase: Union[str, Any] = (3_2, 3_2)
__lowerCAmelCase: Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCamelCase__)
return image
@property
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
torch.manual_seed(0)
__lowerCAmelCase: Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=UpperCamelCase__ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0)
__lowerCAmelCase: Tuple = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0)
__lowerCAmelCase: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
return CLIPTextModel(UpperCamelCase__)
def lowercase_ ( self : List[str])-> Dict:
'''simple docstring'''
__lowerCAmelCase: Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase: int = self.dummy_cond_unet_upscale
__lowerCAmelCase: int = DDPMScheduler()
__lowerCAmelCase: List[str] = DDIMScheduler(prediction_type="v_prediction")
__lowerCAmelCase: Tuple = self.dummy_vae
__lowerCAmelCase: Optional[Any] = self.dummy_text_encoder
__lowerCAmelCase: Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
__lowerCAmelCase: Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
__lowerCAmelCase: List[Any] = Image.fromarray(np.uinta(UpperCamelCase__)).convert("RGB").resize((6_4, 6_4))
# make sure here that pndm scheduler skips prk
__lowerCAmelCase: Optional[int] = StableDiffusionUpscalePipeline(
unet=UpperCamelCase__ , low_res_scheduler=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , max_noise_level=3_5_0 , )
__lowerCAmelCase: Tuple = sd_pipe.to(UpperCamelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__)
__lowerCAmelCase: Any = "A painting of a squirrel eating a burger"
__lowerCAmelCase: str = torch.Generator(device=UpperCamelCase__).manual_seed(0)
__lowerCAmelCase: Optional[int] = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase: List[str] = output.images
__lowerCAmelCase: Union[str, Any] = torch.Generator(device=UpperCamelCase__).manual_seed(0)
__lowerCAmelCase: List[str] = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , return_dict=UpperCamelCase__ , )[0]
__lowerCAmelCase: int = image[0, -3:, -3:, -1]
__lowerCAmelCase: Dict = image_from_tuple[0, -3:, -3:, -1]
__lowerCAmelCase: Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__lowerCAmelCase: List[Any] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def lowercase_ ( self : List[str])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase: Dict = self.dummy_cond_unet_upscale
__lowerCAmelCase: List[str] = DDPMScheduler()
__lowerCAmelCase: Union[str, Any] = DDIMScheduler(prediction_type="v_prediction")
__lowerCAmelCase: Optional[int] = self.dummy_vae
__lowerCAmelCase: List[Any] = self.dummy_text_encoder
__lowerCAmelCase: Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
__lowerCAmelCase: List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
__lowerCAmelCase: str = Image.fromarray(np.uinta(UpperCamelCase__)).convert("RGB").resize((6_4, 6_4))
# make sure here that pndm scheduler skips prk
__lowerCAmelCase: Optional[int] = StableDiffusionUpscalePipeline(
unet=UpperCamelCase__ , low_res_scheduler=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , max_noise_level=3_5_0 , )
__lowerCAmelCase: Optional[int] = sd_pipe.to(UpperCamelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__)
__lowerCAmelCase: List[str] = "A painting of a squirrel eating a burger"
__lowerCAmelCase: List[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase: List[Any] = output.images
assert image.shape[0] == 2
__lowerCAmelCase: Dict = torch.Generator(device=UpperCamelCase__).manual_seed(0)
__lowerCAmelCase: Optional[Any] = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase: List[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU")
def lowercase_ ( self : Tuple)-> Any:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = self.dummy_cond_unet_upscale
__lowerCAmelCase: int = DDPMScheduler()
__lowerCAmelCase: int = DDIMScheduler(prediction_type="v_prediction")
__lowerCAmelCase: Dict = self.dummy_vae
__lowerCAmelCase: int = self.dummy_text_encoder
__lowerCAmelCase: List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
__lowerCAmelCase: List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
__lowerCAmelCase: Optional[int] = Image.fromarray(np.uinta(UpperCamelCase__)).convert("RGB").resize((6_4, 6_4))
# put models in fp16, except vae as it overflows in fp16
__lowerCAmelCase: List[Any] = unet.half()
__lowerCAmelCase: List[str] = text_encoder.half()
# make sure here that pndm scheduler skips prk
__lowerCAmelCase: List[Any] = StableDiffusionUpscalePipeline(
unet=UpperCamelCase__ , low_res_scheduler=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , max_noise_level=3_5_0 , )
__lowerCAmelCase: str = sd_pipe.to(UpperCamelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = "A painting of a squirrel eating a burger"
__lowerCAmelCase: str = torch.manual_seed(0)
__lowerCAmelCase: Dict = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="np" , ).images
__lowerCAmelCase: Optional[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def lowercase_ ( self : Tuple)-> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : List[Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png")
__lowerCAmelCase: Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy")
__lowerCAmelCase: str = "stabilityai/stable-diffusion-x4-upscaler"
__lowerCAmelCase: Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(UpperCamelCase__)
pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
pipe.enable_attention_slicing()
__lowerCAmelCase: Tuple = "a cat sitting on a park bench"
__lowerCAmelCase: int = torch.manual_seed(0)
__lowerCAmelCase: List[Any] = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type="np" , )
__lowerCAmelCase: Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 1e-3
def lowercase_ ( self : Optional[int])-> Any:
'''simple docstring'''
__lowerCAmelCase: Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png")
__lowerCAmelCase: Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy")
__lowerCAmelCase: Optional[Any] = "stabilityai/stable-diffusion-x4-upscaler"
__lowerCAmelCase: Tuple = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase__ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
pipe.enable_attention_slicing()
__lowerCAmelCase: str = "a cat sitting on a park bench"
__lowerCAmelCase: List[str] = torch.manual_seed(0)
__lowerCAmelCase: Optional[Any] = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type="np" , )
__lowerCAmelCase: Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def lowercase_ ( self : Optional[int])-> Dict:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase: Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png")
__lowerCAmelCase: Union[str, Any] = "stabilityai/stable-diffusion-x4-upscaler"
__lowerCAmelCase: Any = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase__ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase: int = "a cat sitting on a park bench"
__lowerCAmelCase: Dict = torch.manual_seed(0)
__lowerCAmelCase: Dict = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , output_type="np" , )
__lowerCAmelCase: Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 217 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
def __magic_name__ ( __lowerCAmelCase : str ) -> YolosConfig:
__lowerCamelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__lowerCamelCase = 192
__lowerCamelCase = 768
__lowerCamelCase = 12
__lowerCamelCase = 3
__lowerCamelCase = [800, 1333]
__lowerCamelCase = False
elif yolos_name == "yolos_s_dWr":
__lowerCamelCase = 330
__lowerCamelCase = 14
__lowerCamelCase = 6
__lowerCamelCase = 1320
elif "yolos_s" in yolos_name:
__lowerCamelCase = 384
__lowerCamelCase = 1536
__lowerCamelCase = 12
__lowerCamelCase = 6
elif "yolos_b" in yolos_name:
__lowerCamelCase = [800, 1344]
__lowerCamelCase = 91
__lowerCamelCase = '''huggingface/label-files'''
__lowerCamelCase = '''coco-detection-id2label.json'''
__lowerCamelCase = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowerCamelCase = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( __lowerCAmelCase : dict , __lowerCAmelCase : YolosConfig , __lowerCAmelCase : bool = False ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
__lowerCamelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[: config.hidden_size, :]
__lowerCamelCase = in_proj_bias[: config.hidden_size]
__lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase = in_proj_weight[-config.hidden_size :, :]
__lowerCamelCase = in_proj_bias[-config.hidden_size :]
def __magic_name__ ( __lowerCAmelCase : str ) -> str:
if "backbone" in name:
__lowerCamelCase = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
__lowerCamelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
__lowerCamelCase = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
__lowerCamelCase = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
__lowerCamelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
__lowerCamelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
__lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__lowerCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
__lowerCamelCase = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
__lowerCamelCase = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
__lowerCamelCase = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __magic_name__ ( __lowerCAmelCase : dict , __lowerCAmelCase : YolosForObjectDetection ) -> dict:
for key in orig_state_dict.copy().keys():
__lowerCamelCase = orig_state_dict.pop(__lowerCamelCase )
if "qkv" in key:
__lowerCamelCase = key.split('''.''' )
__lowerCamelCase = int(key_split[2] )
__lowerCamelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[
dim : dim * 2, :
]
__lowerCamelCase = val[-dim:, :]
else:
__lowerCamelCase = val[:dim]
__lowerCamelCase = val[dim : dim * 2]
__lowerCamelCase = val[-dim:]
else:
__lowerCamelCase = val
return orig_state_dict
def __magic_name__ ( ) -> torch.Tensor:
__lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCamelCase = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : bool = False ) -> Dict:
__lowerCamelCase = get_yolos_config(__lowerCamelCase )
# load original state_dict
__lowerCamelCase = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model''']
# load 🤗 model
__lowerCamelCase = YolosForObjectDetection(__lowerCamelCase )
model.eval()
__lowerCamelCase = convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
__lowerCamelCase = 800 if yolos_name != '''yolos_ti''' else 512
__lowerCamelCase = YolosImageProcessor(format='''coco_detection''' , size=__lowerCamelCase )
__lowerCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
__lowerCamelCase = model(**__lowerCamelCase )
__lowerCamelCase , __lowerCamelCase = outputs.logits, outputs.pred_boxes
__lowerCamelCase , __lowerCamelCase = None, None
if yolos_name == "yolos_ti":
__lowerCamelCase = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
__lowerCamelCase = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
__lowerCamelCase = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
__lowerCamelCase = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
__lowerCamelCase = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
__lowerCamelCase = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
__lowerCamelCase = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
__lowerCamelCase = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
__lowerCamelCase = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
__lowerCamelCase = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __lowerCamelCase , atol=1E-4 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
__lowerCamelCase = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
__lowerCamelCase = model_mapping[yolos_name]
image_processor.push_to_hub(__lowerCamelCase , organization='''hustvl''' )
model.push_to_hub(__lowerCamelCase , organization='''hustvl''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',"
" \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 368 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
SCREAMING_SNAKE_CASE__ : Tuple = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
SCREAMING_SNAKE_CASE__ : List[str] = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def __magic_name__ ( __lowerCAmelCase : Any ) -> int:
__lowerCamelCase = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowerCAmelCase )[0]
@deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> str:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream:
__lowerCamelCase = _readaa(__lowerCAmelCase )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
__lowerCamelCase = _readaa(__lowerCAmelCase )
__lowerCamelCase = _readaa(__lowerCAmelCase )
__lowerCamelCase = _readaa(__lowerCAmelCase )
__lowerCamelCase = bytestream.read(rows * cols * num_images )
__lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta )
__lowerCamelCase = data.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 )
return data
@deprecated(__lowerCAmelCase , '''Please use tf.one_hot on tensors.''' )
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ) -> Dict:
__lowerCamelCase = labels_dense.shape[0]
__lowerCamelCase = numpy.arange(__lowerCAmelCase ) * num_classes
__lowerCamelCase = numpy.zeros((num_labels, num_classes) )
__lowerCamelCase = 1
return labels_one_hot
@deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str=False , __lowerCAmelCase : List[str]=10 ) -> List[str]:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream:
__lowerCamelCase = _readaa(__lowerCAmelCase )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
__lowerCamelCase = _readaa(__lowerCAmelCase )
__lowerCamelCase = bytestream.read(__lowerCAmelCase )
__lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__lowerCAmelCase , __lowerCAmelCase )
return labels
class lowerCAmelCase__ :
@deprecated(
SCREAMING_SNAKE_CASE__ , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=None , ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase = random_seed.get_seed(SCREAMING_SNAKE_CASE__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__lowerCamelCase = dtypes.as_dtype(SCREAMING_SNAKE_CASE__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
__lowerCamelCase = 1_00_00
__lowerCamelCase = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
__lowerCamelCase = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__lowerCamelCase = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__lowerCamelCase = images.astype(numpy.floataa )
__lowerCamelCase = numpy.multiply(SCREAMING_SNAKE_CASE__ , 1.0 / 255.0 )
__lowerCamelCase = images
__lowerCamelCase = labels
__lowerCamelCase = 0
__lowerCamelCase = 0
@property
def __A ( self : str ) -> Optional[int]:
return self._images
@property
def __A ( self : Any ) -> Dict:
return self._labels
@property
def __A ( self : List[Any] ) -> int:
return self._num_examples
@property
def __A ( self : str ) -> Any:
return self._epochs_completed
def __A ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : str=True ) -> str:
if fake_data:
__lowerCamelCase = [1] * 7_84
__lowerCamelCase = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(SCREAMING_SNAKE_CASE__ )],
[fake_label for _ in range(SCREAMING_SNAKE_CASE__ )],
)
__lowerCamelCase = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__lowerCamelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.images[perma]
__lowerCamelCase = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__lowerCamelCase = self._num_examples - start
__lowerCamelCase = self._images[start : self._num_examples]
__lowerCamelCase = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__lowerCamelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.images[perm]
__lowerCamelCase = self.labels[perm]
# Start next epoch
__lowerCamelCase = 0
__lowerCamelCase = batch_size - rest_num_examples
__lowerCamelCase = self._index_in_epoch
__lowerCamelCase = self._images[start:end]
__lowerCamelCase = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__lowerCamelCase = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__lowerCAmelCase , '''Please write your own downloading logic.''' )
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
if not gfile.Exists(__lowerCAmelCase ):
gfile.MakeDirs(__lowerCAmelCase )
__lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if not gfile.Exists(__lowerCAmelCase ):
urllib.request.urlretrieve(__lowerCAmelCase , __lowerCAmelCase ) # noqa: S310
with gfile.GFile(__lowerCAmelCase ) as f:
__lowerCamelCase = f.size()
print('''Successfully downloaded''' , __lowerCAmelCase , __lowerCAmelCase , '''bytes.''' )
return filepath
@deprecated(
__lowerCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[str]=dtypes.floataa , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int=5000 , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[str]=DEFAULT_SOURCE_URL , ) -> Optional[Any]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__lowerCAmelCase , one_hot=__lowerCAmelCase , dtype=__lowerCAmelCase , seed=__lowerCAmelCase )
__lowerCamelCase = fake()
__lowerCamelCase = fake()
__lowerCamelCase = fake()
return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase )
if not source_url: # empty string check
__lowerCamelCase = DEFAULT_SOURCE_URL
__lowerCamelCase = '''train-images-idx3-ubyte.gz'''
__lowerCamelCase = '''train-labels-idx1-ubyte.gz'''
__lowerCamelCase = '''t10k-images-idx3-ubyte.gz'''
__lowerCamelCase = '''t10k-labels-idx1-ubyte.gz'''
__lowerCamelCase = _maybe_download(
__lowerCAmelCase , __lowerCAmelCase , source_url + train_images_file )
with gfile.Open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = _extract_images(__lowerCAmelCase )
__lowerCamelCase = _maybe_download(
__lowerCAmelCase , __lowerCAmelCase , source_url + train_labels_file )
with gfile.Open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase )
__lowerCamelCase = _maybe_download(
__lowerCAmelCase , __lowerCAmelCase , source_url + test_images_file )
with gfile.Open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = _extract_images(__lowerCAmelCase )
__lowerCamelCase = _maybe_download(
__lowerCAmelCase , __lowerCAmelCase , source_url + test_labels_file )
with gfile.Open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase )
if not 0 <= validation_size <= len(__lowerCAmelCase ):
__lowerCamelCase = (
'''Validation size should be between 0 and '''
f'''{len(__lowerCAmelCase )}. Received: {validation_size}.'''
)
raise ValueError(__lowerCAmelCase )
__lowerCamelCase = train_images[:validation_size]
__lowerCamelCase = train_labels[:validation_size]
__lowerCamelCase = train_images[validation_size:]
__lowerCamelCase = train_labels[validation_size:]
__lowerCamelCase = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
__lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
__lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
__lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase )
| 339 | 0 |
from __future__ import annotations
def UpperCAmelCase_ ( __snake_case ) -> list[int]:
"""simple docstring"""
if len(__snake_case ) == 0:
return array
_lowercase , _lowercase =min(__snake_case ), max(__snake_case )
# Compute the variables
_lowercase =_max - _min + 1
_lowercase , _lowercase =[0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_lowercase =i - _min
_lowercase =i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_lowercase =0
for i in range(__snake_case ):
while holes_repeat[i] > 0:
_lowercase =holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = input('''Enter numbers separated by comma:\n''')
UpperCAmelCase__ = [int(x) for x in user_input.split(''',''')]
print(pigeon_sort(unsorted))
| 5 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase = 1_6 , UpperCAmelCase = 8_8 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 3_2 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = None , ) -> Any:
super().__init__()
_lowercase =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCAmelCase , attention_head_dim=UpperCAmelCase , in_channels=UpperCAmelCase , num_layers=UpperCAmelCase , dropout=UpperCAmelCase , norm_num_groups=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , attention_bias=UpperCAmelCase , sample_size=UpperCAmelCase , num_vector_embeds=UpperCAmelCase , activation_fn=UpperCAmelCase , num_embeds_ada_norm=UpperCAmelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_lowercase =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_lowercase =[7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_lowercase =[1, 0]
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = True , ) -> str:
_lowercase =hidden_states
_lowercase =[]
_lowercase =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_lowercase =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_lowercase =self.transformer_index_for_condition[i]
_lowercase =self.transformers[transformer_index](
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_lowercase =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_lowercase =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCAmelCase )
| 5 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowerCamelCase__( UpperCamelCase__):
def __init__( self: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = dataset
__lowerCamelCase = process
__lowerCamelCase = params
def __len__( self: List[str] ):
return len(self.dataset )
def __getitem__( self: Union[str, Any] , UpperCamelCase_: List[str] ):
__lowerCamelCase = self.dataset[i]
__lowerCamelCase = self.process(__a , **self.params )
return processed
class lowerCamelCase__( UpperCamelCase__):
def __init__( self: int , UpperCamelCase_: List[Any] , UpperCamelCase_: int , UpperCamelCase_: str , UpperCamelCase_: Optional[int]=None ):
__lowerCamelCase = loader
__lowerCamelCase = infer
__lowerCamelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__lowerCamelCase = None
__lowerCamelCase = loader_batch_size
# Internal bookkeeping
__lowerCamelCase = None
__lowerCamelCase = None
def __len__( self: Any ):
return len(self.loader )
def __iter__( self: Dict ):
__lowerCamelCase = iter(self.loader )
return self
def lowerCAmelCase__ ( self: Tuple ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__lowerCamelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__lowerCamelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(__a , __a ):
# Convert ModelOutput to tuple first
__lowerCamelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
__lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(__a , __a ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
__lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__lowerCamelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__lowerCamelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__lowerCamelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__lowerCamelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__lowerCamelCase = self._loader_batch_data.__class__(__a )
self._loader_batch_index += 1
return result
def lowerCAmelCase__ ( self: Optional[Any] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__lowerCamelCase = next(self.iterator )
__lowerCamelCase = self.infer(__a , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(__a , torch.Tensor ):
__lowerCamelCase = processed
else:
__lowerCamelCase = list(processed.keys() )[0]
__lowerCamelCase = processed[key]
if isinstance(__a , __a ):
__lowerCamelCase = len(__a )
else:
__lowerCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__lowerCamelCase = observed_batch_size
# Setting internal index to unwrap the batch
__lowerCamelCase = processed
__lowerCamelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowerCamelCase__( UpperCamelCase__):
def __init__( self: List[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict=None ):
super().__init__(__a , __a , __a )
def __iter__( self: Tuple ):
__lowerCamelCase = iter(self.loader )
__lowerCamelCase = None
return self
def lowerCAmelCase__ ( self: str ):
if self.subiterator is None:
__lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
__lowerCamelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
__lowerCamelCase = next(self.subiterator )
return processed
class lowerCamelCase__( UpperCamelCase__):
def __iter__( self: str ):
__lowerCamelCase = iter(self.loader )
return self
def lowerCAmelCase__ ( self: Optional[int] ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
__lowerCamelCase = False
__lowerCamelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__lowerCamelCase = self.loader_batch_item()
__lowerCamelCase = item.pop('is_last' )
accumulator.append(__a )
if is_last:
return accumulator
while not is_last:
__lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(__a , torch.Tensor ):
__lowerCamelCase = processed
else:
__lowerCamelCase = list(processed.keys() )[0]
__lowerCamelCase = processed[key]
if isinstance(__a , __a ):
__lowerCamelCase = len(__a )
else:
__lowerCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__lowerCamelCase = observed_batch_size
__lowerCamelCase = processed
__lowerCamelCase = 0
while self._loader_batch_index < self.loader_batch_size:
__lowerCamelCase = self.loader_batch_item()
__lowerCamelCase = item.pop('is_last' )
accumulator.append(__a )
if is_last:
return accumulator
else:
__lowerCamelCase = processed
__lowerCamelCase = item.pop('is_last' )
accumulator.append(__a )
return accumulator
class lowerCamelCase__( UpperCamelCase__):
def __init__( self: str , UpperCamelCase_: Dataset , UpperCamelCase_: str ):
__lowerCamelCase = dataset
__lowerCamelCase = key
def __len__( self: List[str] ):
return len(self.dataset )
def __getitem__( self: Union[str, Any] , UpperCamelCase_: int ):
return self.dataset[i][self.key]
class lowerCamelCase__( UpperCamelCase__):
def __init__( self: int , UpperCamelCase_: Dataset , UpperCamelCase_: str , UpperCamelCase_: str ):
__lowerCamelCase = dataset
__lowerCamelCase = keya
__lowerCamelCase = keya
def __len__( self: Union[str, Any] ):
return len(self.dataset )
def __getitem__( self: Optional[int] , UpperCamelCase_: Optional[int] ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 367 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]=False ):
'''simple docstring'''
try:
__lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase = strtobool(A__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase_ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(A__ )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple=None , A__ : Optional[Any]=None ):
'''simple docstring'''
if test_case is None:
return partial(A__ , version=A__ )
return unittest.skipUnless(is_torch_version(""">=""" , A__ ) , f'test requires torch version >= {version}' )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(A__ )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(A__ )
UpperCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(A__ )
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : List[Any] = True
@classmethod
def lowerCAmelCase__ ( cls: int ):
__lowerCamelCase = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase__ ( cls: Any ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase__ ( self: Any ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase_ )
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[mock.Mock, List[mock.Mock]] ):
__lowerCamelCase = mocks if isinstance(UpperCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = AcceleratorState()
__lowerCamelCase = tensor[None].clone().to(state.device )
__lowerCamelCase = gather(A__ ).cpu()
__lowerCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , A__ ):
return False
return True
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any ):
__lowerCamelCase = returncode
__lowerCamelCase = stdout
__lowerCamelCase = stderr
async def lowerCamelCase__ ( A__ : int , A__ : Any ):
'''simple docstring'''
while True:
__lowerCamelCase = await stream.readline()
if line:
callback(A__ )
else:
break
async def lowerCamelCase__ ( A__ : Dict , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , A__ : Tuple=False , A__ : List[Any]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(A__ ) )
__lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase = []
__lowerCamelCase = []
def tee(A__ : int , A__ : Any , A__ : Optional[Any] , A__ : int="" ):
__lowerCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(A__ )
if not quiet:
print(A__ , A__ , file=A__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda A__ : tee(A__ , A__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda A__ : tee(A__ , A__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=A__ , )
return _RunOutput(await p.wait() , A__ , A__ )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Any=None , A__ : Union[str, Any]=None , A__ : Dict=180 , A__ : str=False , A__ : List[Any]=True ):
'''simple docstring'''
__lowerCamelCase = asyncio.get_event_loop()
__lowerCamelCase = loop.run_until_complete(
_stream_subprocess(A__ , env=A__ , stdin=A__ , timeout=A__ , quiet=A__ , echo=A__ ) )
__lowerCamelCase = """ """.join(A__ )
if result.returncode > 0:
__lowerCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowerCamelCase__( __lowerCamelCase):
pass
def lowerCamelCase__ ( A__ : List[str] , A__ : Union[str, Any]=False ):
'''simple docstring'''
try:
__lowerCamelCase = subprocess.check_output(A__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(A__ , """decode""" ):
__lowerCamelCase = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(A__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 29 | 0 |
from ....utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=2048 ) -> Dict:
"""simple docstring"""
__magic_name__ = config.__dict__
__magic_name__ = modal_hidden_size
if num_labels:
__magic_name__ = num_labels
| 88 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = 42
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : str=3 , UpperCamelCase__ : List[Any]=("DownEncoderBlock2D",) , UpperCamelCase__ : Optional[Any]=(64,) , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Optional[Any]="silu" , UpperCamelCase__ : List[str]=True , ) -> str:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
# down
__magic_name__ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = 2 * out_channels if double_z else out_channels
__magic_name__ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : List[str] , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__magic_name__ = x
__magic_name__ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : int ):
def custom_forward(*UpperCamelCase__ : str ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
__magic_name__ = down_block(UpperCamelCase__ )
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ )
# post-process
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : int=3 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[Any]=("UpDecoderBlock2D",) , UpperCamelCase__ : List[Any]=(64,) , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Optional[int]="silu" , UpperCamelCase__ : Tuple="group" , ) -> Dict:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
__magic_name__ = in_channels if norm_type == """spatial""" else None
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
__magic_name__ = list(reversed(UpperCamelCase__ ) )
__magic_name__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = reversed_block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
__magic_name__ = output_channel
# out
if norm_type == "spatial":
__magic_name__ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None ) -> Tuple:
"""simple docstring"""
__magic_name__ = z
__magic_name__ = self.conv_in(UpperCamelCase__ )
__magic_name__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : Optional[int] ):
def custom_forward(*UpperCamelCase__ : int ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
else:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict="random" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict=True ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__magic_name__ = n_e
__magic_name__ = vq_embed_dim
__magic_name__ = beta
__magic_name__ = legacy
__magic_name__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__magic_name__ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
__magic_name__ = self.used.shape[0]
__magic_name__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__magic_name__ = self.re_embed
__magic_name__ = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
__magic_name__ = n_e
__magic_name__ = sane_index_shape
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
__magic_name__ = (inds[:, :, None] == used[None, None, ...]).long()
__magic_name__ = match.argmax(-1 )
__magic_name__ = match.sum(2 ) < 1
if self.unknown_index == "random":
__magic_name__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__magic_name__ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
__magic_name__ = 0 # simply set to zero
__magic_name__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def _lowercase ( self : List[str] , UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
__magic_name__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
__magic_name__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__magic_name__ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
__magic_name__ = self.embedding(UpperCamelCase__ ).view(z.shape )
__magic_name__ = None
__magic_name__ = None
# compute loss for embedding
if not self.legacy:
__magic_name__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__magic_name__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__magic_name__ = z + (z_q - z).detach()
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__magic_name__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__magic_name__ = self.remap_to_used(UpperCamelCase__ )
__magic_name__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__magic_name__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ) -> int:
"""simple docstring"""
if self.remap is not None:
__magic_name__ = indices.reshape(shape[0] , -1 ) # add batch axis
__magic_name__ = self.unmap_to_all(UpperCamelCase__ )
__magic_name__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__magic_name__ = self.embedding(UpperCamelCase__ )
if shape is not None:
__magic_name__ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = parameters
__magic_name__ , __magic_name__ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
__magic_name__ = torch.clamp(self.logvar , -30.0 , 20.0 )
__magic_name__ = deterministic
__magic_name__ = torch.exp(0.5 * self.logvar )
__magic_name__ = torch.exp(self.logvar )
if self.deterministic:
__magic_name__ = __magic_name__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _lowercase ( self : Tuple , UpperCamelCase__ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
__magic_name__ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
__magic_name__ = self.mean + self.std * sample
return x
def _lowercase ( self : Dict , UpperCamelCase__ : Optional[int]=None ) -> Any:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict=[1, 2, 3] ) -> Optional[int]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
__magic_name__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.mean
| 88 | 1 |
'''simple docstring'''
import random
from typing import Any
def __lowerCamelCase ( _lowercase ) -> list[Any]:
for _ in range(len(_lowercase ) ):
UpperCAmelCase : Union[str, Any] = random.randint(0 , len(_lowercase ) - 1 )
UpperCAmelCase : int = random.randint(0 , len(_lowercase ) - 1 )
UpperCAmelCase , UpperCAmelCase : List[str] = data[b], data[a]
return data
if __name__ == "__main__":
a : Optional[int] = [0, 1, 2, 3, 4, 5, 6, 7]
a : Dict = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 338 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : List[str] = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def __lowerCamelCase ( _lowercase = 1_0_0 ) -> int:
UpperCAmelCase : int = 1
UpperCAmelCase : str = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase : Tuple = pre_numerator
UpperCAmelCase : Optional[int] = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase : Union[str, Any] = cur_numerator
UpperCAmelCase : Optional[int] = e_cont * pre_numerator + temp
return sum_digits(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 338 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""nielsr/canine-s""": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
a_ = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
a_ = 0
a_ = 0Xe000
a_ = 0Xe001
a_ = 0Xe002
a_ = 0Xe003
a_ = 0Xe004
# Maps special codepoints to human-readable names.
a_ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
a_ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCamelCase=chr(__lowerCamelCase ) , __lowerCamelCase=chr(__lowerCamelCase ) , __lowerCamelCase=chr(__lowerCamelCase ) , __lowerCamelCase=chr(__lowerCamelCase ) , __lowerCamelCase=chr(__lowerCamelCase ) , __lowerCamelCase=chr(__lowerCamelCase ) , __lowerCamelCase=False , __lowerCamelCase=2048 , **__lowerCamelCase , ):
'''simple docstring'''
__A : Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
__A : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
__A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
__A : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
__A : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , model_max_length=__lowerCamelCase , **__lowerCamelCase , )
# Creates a mapping for looking up the IDs of special symbols.
__A : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__A : Union[str, Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__A : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__A : Tuple = UNICODE_VOCAB_SIZE
__A : int = len(self._special_codepoints )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return self._unicode_vocab_size
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
return list(__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
try:
return ord(__lowerCamelCase )
except TypeError:
raise ValueError(F"""invalid token: '{token}'""" )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__lowerCamelCase )
except TypeError:
raise ValueError(F"""invalid id: {index}""" )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
return "".join(__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
__A : Union[str, Any] = [self.sep_token_id]
__A : List[Any] = [self.cls_token_id]
__A : Dict = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
__A : Optional[Any] = [1] + ([0] * len(__lowerCamelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(__lowerCamelCase )) + [1]
return result
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
__A : str = [self.sep_token_id]
__A : str = [self.cls_token_id]
__A : str = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
return ()
| 179 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __lowerCamelCase , __lowerCamelCase=7 , __lowerCamelCase=3 , __lowerCamelCase=18 , __lowerCamelCase=30 , __lowerCamelCase=400 , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=None , ):
'''simple docstring'''
__A : str = size if size is not None else {'''height''': 20, '''width''': 20}
__A : Dict = parent
__A : Union[str, Any] = batch_size
__A : List[Any] = num_channels
__A : Union[str, Any] = image_size
__A : Any = min_resolution
__A : str = max_resolution
__A : Any = size
__A : Dict = do_normalize
__A : str = do_convert_rgb
__A : List[Any] = [512, 1024, 2048, 4096]
__A : Dict = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def UpperCamelCase__( self ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__A : Optional[Any] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = PixaStructImageProcessingTester(self )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_convert_rgb''' ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Tuple = self.image_processor_tester.prepare_dummy_image()
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
__A : Dict = 2048
__A : Any = image_processor(__lowerCamelCase , return_tensors='''pt''' , max_patches=__lowerCamelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1e-3 , rtol=1e-3 ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
__A : List[str] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__A : Dict = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__A : Tuple = image_processor(
__lowerCamelCase , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
__A : Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__A : str = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__lowerCamelCase ):
__A : List[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches
__A : Any = '''Hello'''
__A : Tuple = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__lowerCamelCase , header_text=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__A : Union[str, Any] = image_processor(
__lowerCamelCase , return_tensors='''pt''' , max_patches=__lowerCamelCase , header_text=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
__A : List[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__A : List[str] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__A : Union[str, Any] = image_processor(
__lowerCamelCase , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
__A : int = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__A : List[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__A : Dict = image_processor(
__lowerCamelCase , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = PixaStructImageProcessingTester(self , num_channels=4 )
__A : Any = 3
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_convert_rgb''' ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
__A : List[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__A : Any = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__A : int = image_processor(
__lowerCamelCase , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 179 | 1 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCamelCase__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
lowerCamelCase__ = f"""https://www.google.com/search?q={query}&num=100"""
lowerCamelCase__ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
lowerCamelCase__ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
lowerCamelCase__ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 310 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase__ = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if "://" in dataset_path:
_UpperCamelCase : List[Any] = dataset_path.split("://" )[1]
return dataset_path
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = not is_remote_filesystem(lowercase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase_ ) ,fs._strip_protocol(lowercase_ ) )
else:
fs.mv(lowercase_ ,lowercase_ ,recursive=lowercase_ )
def lowercase__ ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn ,"reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_UpperCamelCase : Dict = None
_UpperCamelCase : str = None
_UpperCamelCase : str = threading.Lock()
| 310 | 1 |
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Any = abs(_UpperCAmelCase )
__UpperCAmelCase : Tuple = 0
while n > 0:
res += n % 10
n //= 10
return res
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = abs(_UpperCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
return sum(int(_UpperCAmelCase ) for c in str(abs(_UpperCAmelCase ) ) )
def a ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCAmelCase : Callable , _UpperCAmelCase : int ) -> None:
__UpperCAmelCase : Tuple = f'{func.__name__}({value})'
__UpperCAmelCase : Any = timeit(f'__main__.{call}' , setup='''import __main__''' )
print(f'{call:56} = {func(_UpperCAmelCase )} -- {timing:.4f} seconds' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 226 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = k_size // 2
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__UpperCAmelCase : Any = 1 / (2 * pi * sigma) * exp(-(square(_UpperCAmelCase ) + square(_UpperCAmelCase )) / (2 * square(_UpperCAmelCase )) )
return g
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
__UpperCAmelCase : str = height - k_size + 1
__UpperCAmelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__UpperCAmelCase : str = zeros((dst_height * dst_width, k_size * k_size) )
__UpperCAmelCase : Optional[Any] = 0
for i, j in product(range(_UpperCAmelCase ) , range(_UpperCAmelCase ) ):
__UpperCAmelCase : int = ravel(image[i : i + k_size, j : j + k_size] )
__UpperCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
__UpperCAmelCase : Tuple = gen_gaussian_kernel(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : List[Any] = ravel(_UpperCAmelCase )
# reshape and get the dst image
__UpperCAmelCase : Optional[Any] = dot(_UpperCAmelCase , _UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase )
return dst
if __name__ == "__main__":
# read original image
__A =imread(R"../image_data/lena.jpg")
# turn image in gray scale value
__A =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__A =gaussian_filter(gray, 3, sigma=1)
__A =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 226 | 1 |
import math
import qiskit
def A ( _lowercase = 1 , _lowercase = 1 , _lowercase = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(lowercase__ , lowercase__ )
or isinstance(lowercase__ , lowercase__ )
or isinstance(lowercase__ , lowercase__ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowercase__ ) != input_a)
or (math.floor(lowercase__ ) != input_a)
or (math.floor(lowercase__ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
SCREAMING_SNAKE_CASE : Union[str, Any] = qiskit.QuantumRegister(4 , '''qr''' )
SCREAMING_SNAKE_CASE : Optional[Any] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
SCREAMING_SNAKE_CASE : str = [input_a, input_a, carry_in]
SCREAMING_SNAKE_CASE : Optional[Any] = qiskit.QuantumCircuit(lowercase__ , lowercase__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowercase__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowercase__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowercase__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowercase__ ) # measure the last two qbits
SCREAMING_SNAKE_CASE : List[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
SCREAMING_SNAKE_CASE : Optional[Any] = qiskit.execute(lowercase__ , lowercase__ , shots=1_000 )
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 356 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = KandinskyInpaintPipeline
UpperCamelCase_ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCamelCase_ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCamelCase_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase_ = False
@property
def __A ( self : Tuple ):
'''simple docstring'''
return 32
@property
def __A ( self : List[str] ):
'''simple docstring'''
return 32
@property
def __A ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def __A ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __A ( self : List[Any] ):
'''simple docstring'''
return 100
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __A ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE : Any = MultilingualCLIP(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : str = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def __A ( self : int ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Dict = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[str] = self.dummy_unet
SCREAMING_SNAKE_CASE : int = self.dummy_movq
SCREAMING_SNAKE_CASE : List[str] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Any = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __A ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any]=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase__ )
# create init_image
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
SCREAMING_SNAKE_CASE : Tuple = np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE : List[Any] = 0
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = '''cpu'''
SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE : str = self.pipeline_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : int = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : str = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __A ( self : str ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : int = np.ones((768, 768) , dtype=np.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = '''a hat'''
SCREAMING_SNAKE_CASE : Dict = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(
UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 258 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
A__ : Tuple = logging.get_logger(__name__)
A__ : int = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : int=None, lowerCamelCase : int=None, *lowerCamelCase : List[Any], **lowerCamelCase : Any ):
'''simple docstring'''
super().__init__(*lowerCamelCase, **lowerCamelCase )
if config is None:
assert isinstance(self.model, lowerCamelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
lowercase__ = self.model.config
else:
lowercase__ = config
lowercase__ = data_args
lowercase__ = self.config.tgt_vocab_size if isinstance(self.config, lowerCamelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase__ = label_smoothed_nll_loss
def lowercase__ ( self : List[Any], lowerCamelCase : int ):
'''simple docstring'''
if self.optimizer is None:
lowercase__ = ['''bias''', '''LayerNorm.weight''']
lowercase__ = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase__ = Adafactor
lowercase__ = {'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase__ = AdamW
lowercase__ = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase__ = self.args.learning_rate
if self.sharded_ddp:
lowercase__ = OSS(
params=lowerCamelCase, optim=lowerCamelCase, **lowerCamelCase, )
else:
lowercase__ = optimizer_cls(lowerCamelCase, **lowerCamelCase )
if self.lr_scheduler is None:
lowercase__ = self._get_lr_scheduler(lowerCamelCase )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def lowercase__ ( self : List[str], lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase__ = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps )
else:
lowercase__ = schedule_func(
self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=lowerCamelCase )
return scheduler
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
if isinstance(self.train_dataset, torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size, distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED), )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : str, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase__ = model(**lowerCamelCase, use_cache=lowerCamelCase )[0]
lowercase__ = self.loss_fn(logits.view(-1, logits.shape[-1] ), labels.view(-1 ) )
else:
# compute usual loss via models
lowercase__ , lowercase__ = model(**lowerCamelCase, labels=lowerCamelCase, use_cache=lowerCamelCase )[:2]
else:
# compute label smoothed loss
lowercase__ = model(**lowerCamelCase, use_cache=lowerCamelCase )[0]
lowercase__ = torch.nn.functional.log_softmax(lowerCamelCase, dim=-1 )
lowercase__ , lowercase__ = self.loss_fn(lowerCamelCase, lowerCamelCase, self.args.label_smoothing, ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase__ ( self : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = inputs.pop('''labels''' )
lowercase__ , lowercase__ = self._compute_loss(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return loss
def lowercase__ ( self : str, lowerCamelCase : nn.Module, lowerCamelCase : Dict[str, Union[torch.Tensor, Any]], lowerCamelCase : bool, lowerCamelCase : Optional[List[str]] = None, ):
'''simple docstring'''
lowercase__ = self._prepare_inputs(lowerCamelCase )
lowercase__ = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase__ = self.model.generate(
inputs['''input_ids'''], attention_mask=inputs['''attention_mask'''], **lowerCamelCase, )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase__ = self._pad_tensors_to_max_len(lowerCamelCase, gen_kwargs['''max_length'''] )
lowercase__ = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase__ , lowercase__ = self._compute_loss(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase__ = self._pad_tensors_to_max_len(lowerCamelCase, gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def lowercase__ ( self : List[Any], lowerCamelCase : str, lowerCamelCase : Any ):
'''simple docstring'''
# If PAD token is not defined at least EOS token has to be defined
lowercase__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
F""" padded to `max_length`={max_length}""" )
lowercase__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device )
lowercase__ = tensor
return padded_tensor
| 207 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : str=14, lowerCamelCase : List[str]=7, lowerCamelCase : Dict=True, lowerCamelCase : List[str]=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Any=True, lowerCamelCase : List[str]=99, lowerCamelCase : Optional[Any]=32, lowerCamelCase : Optional[int]=5, lowerCamelCase : List[Any]=4, lowerCamelCase : List[Any]=37, lowerCamelCase : List[str]="gelu", lowerCamelCase : Any=0.1, lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : List[Any]=512, lowerCamelCase : List[str]=16, lowerCamelCase : Dict=2, lowerCamelCase : Union[str, Any]=0.02, lowerCamelCase : Optional[int]=3, lowerCamelCase : Dict=4, lowerCamelCase : List[Any]=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_token_type_ids
lowercase__ = use_input_mask
lowercase__ = use_labels
lowercase__ = use_mc_token_ids
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = self.vocab_size - 1
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = None
if self.use_mc_token_ids:
lowercase__ = ids_tensor([self.batch_size, self.num_choices], self.seq_length )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = self.get_config()
lowercase__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any], *lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = CTRLModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
model(lowerCamelCase, token_type_ids=lowerCamelCase, head_mask=lowerCamelCase )
model(lowerCamelCase, token_type_ids=lowerCamelCase )
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ), config.n_layer )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int], *lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = CTRLLMHeadModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def lowercase__ ( self : List[str], lowerCamelCase : Tuple, lowerCamelCase : Optional[Any], lowerCamelCase : int, lowerCamelCase : Any, *lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = CTRLForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
lowercase__ = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Dict, lowerCamelCase : Optional[int], lowerCamelCase : str, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = CTRLModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, n_embd=37 )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowerCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@slow
def lowercase__ ( self : Any ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = CTRLModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Any ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(lowerCamelCase )
lowercase__ = torch.tensor(
[[11_859, 0, 1_611, 8]], dtype=torch.long, device=lowerCamelCase ) # Legal the president is
lowercase__ = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowercase__ = model.generate(lowerCamelCase, do_sample=lowerCamelCase )
self.assertListEqual(output_ids[0].tolist(), lowerCamelCase )
| 207 | 1 |
"""simple docstring"""
import os
def __lowercase ( _a = "input.txt" ):
with open(os.path.join(os.path.dirname(_a ) , _a ) ) as input_file:
snake_case_ : Tuple = [
[int(_a ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
snake_case_ : Tuple = len(_a )
snake_case_ : str = len(matrix[0] )
snake_case_ : Any = [[-1 for _ in range(_a )] for _ in range(_a )]
for i in range(_a ):
snake_case_ : int = matrix[i][0]
for j in range(1 , _a ):
for i in range(_a ):
snake_case_ : Optional[int] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _a ):
snake_case_ : List[Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
snake_case_ : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'{solution() = }')
| 354 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ : Union[str, Any] = get_tests_dir('''fixtures''')
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Any ):
# A mock response for an HTTP head request to emulate server down
snake_case_ : Any = mock.Mock()
snake_case_ : Tuple = 500
snake_case_ : Dict = {}
snake_case_ : Optional[Any] = HTTPError
snake_case_ : Optional[int] = {}
# Download this model to make sure it's in the cache.
snake_case_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowercase_ ) as mock_head:
snake_case_ : Any = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self : Optional[int] ):
# This test is for deprecated behavior and can be removed in v5
snake_case_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase):
@classmethod
def _snake_case ( cls : List[Any] ):
snake_case_ : Dict = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def _snake_case ( cls : int ):
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def _snake_case ( self : Any ):
snake_case_ : str = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase_ , repo_id='''test-feature-extractor''' , push_to_hub=lowercase_ , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def _snake_case ( self : List[Any] ):
snake_case_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=lowercase_ , use_auth_token=self._token )
snake_case_ : str = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def _snake_case ( self : List[Any] ):
CustomFeatureExtractor.register_for_auto_class()
snake_case_ : int = CustomFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
snake_case_ : List[str] = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 155 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Dict = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Optional[int] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=3 ,)
return model
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = self.dummy_uncond_unet
lowercase : Optional[Any] = DDIMScheduler()
lowercase : Optional[Any] = self.dummy_vq_model
lowercase : Any = LDMPipeline(unet=snake_case ,vqvae=snake_case ,scheduler=snake_case )
ldm.to(snake_case )
ldm.set_progress_bar_config(disable=snake_case )
lowercase : str = torch.manual_seed(0 )
lowercase : List[Any] = ldm(generator=snake_case ,num_inference_steps=2 ,output_type="""numpy""" ).images
lowercase : int = torch.manual_seed(0 )
lowercase : Any = ldm(generator=snake_case ,num_inference_steps=2 ,output_type="""numpy""" ,return_dict=snake_case )[0]
lowercase : Optional[int] = image[0, -3:, -3:, -1]
lowercase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : str = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
lowercase : List[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case )
ldm.set_progress_bar_config(disable=snake_case )
lowercase : List[Any] = torch.manual_seed(0 )
lowercase : Optional[int] = ldm(generator=snake_case ,num_inference_steps=5 ,output_type="""numpy""" ).images
lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase : str = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
lowercase : str = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 20 |
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _a ( unittest.TestCase ):
'''simple docstring'''
A : List[Any] = inspect.getfile(accelerate.test_utils )
A : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
A : List[str] = ['''accelerate''', '''launch''']
A : List[Any] = Path.home() / '''.cache/huggingface/accelerate'''
A : Any = '''default_config.yaml'''
A : Dict = config_folder / config_file
A : Union[str, Any] = config_folder / '''_default_config.yaml'''
A : int = Path('''tests/test_configs''' )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path], env=os.environ.copy() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=A ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(A ), self.test_file_path], env=os.environ.copy() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
execute_subprocess_async(['accelerate', 'test'], env=os.environ.copy() )
class _a ( unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = '''test-tpu'''
A : List[str] = '''us-central1-a'''
A : List[str] = '''ls'''
A : List[str] = ['''accelerate''', '''tpu-config''']
A : List[str] = '''cd /usr/share'''
A : Optional[int] = '''tests/test_samples/test_command_file.sh'''
A : Optional[int] = '''Running gcloud compute tpus tpu-vm ssh'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'], return_stdout=A )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all", A, )
| 251 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Dict = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class lowercase ( a ):
lowercase__ : str = """data2vec-text"""
def __init__( self : List[str] , _UpperCamelCase : List[Any]=30_522 , _UpperCamelCase : Any=768 , _UpperCamelCase : str=12 , _UpperCamelCase : str=12 , _UpperCamelCase : Any=3_072 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : List[Any]=512 , _UpperCamelCase : Optional[int]=2 , _UpperCamelCase : Any=0.0_2 , _UpperCamelCase : Dict=1e-12 , _UpperCamelCase : Any=1 , _UpperCamelCase : List[str]=0 , _UpperCamelCase : str=2 , _UpperCamelCase : Dict="absolute" , _UpperCamelCase : int=True , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : Tuple , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = classifier_dropout
class lowercase ( a ):
@property
def __snake_case( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 365 | from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : int = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
_lowerCamelCase : Tuple = {'''allegro/herbert-base-cased''': 5_14}
_lowerCamelCase : Optional[int] = {}
class lowercase ( a ):
lowercase__ : List[str] = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Tuple = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : str = HerbertTokenizer
def __init__( self : Dict , _UpperCamelCase : Any=None , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Optional[int]="<s>" , _UpperCamelCase : Union[str, Any]="<unk>" , _UpperCamelCase : List[str]="<pad>" , _UpperCamelCase : List[str]="<mask>" , _UpperCamelCase : Tuple="</s>" , **_UpperCamelCase : Any , ) -> str:
'''simple docstring'''
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , cls_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sep_token=_UpperCamelCase , **_UpperCamelCase , )
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __snake_case( self : Any , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def __snake_case( self : Union[str, Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case( self : str , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 206 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def SCREAMING_SNAKE_CASE_ ( __A : list , __A : list , __A : list , __A : list , __A : list ) -> float:
"""simple docstring"""
a_ : Dict = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__A )] )
a_ : Union[str, Any] = np.array(__A )
a_ : Dict = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __A ) ) , x.transpose() ) , __A )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def SCREAMING_SNAKE_CASE_ ( __A : list , __A : list , __A : list ) -> float:
"""simple docstring"""
a_ : Optional[Any] = (1, 2, 1)
a_ : List[Any] = (1, 1, 0, 7)
a_ : Optional[int] = SARIMAX(
__A , exog=__A , order=__A , seasonal_order=__A )
a_ : Union[str, Any] = model.fit(disp=__A , maxiter=6_00 , method='nm' )
a_ : int = model_fit.predict(1 , len(__A ) , exog=[test_match] )
return result[0]
def SCREAMING_SNAKE_CASE_ ( __A : list , __A : list , __A : list ) -> float:
"""simple docstring"""
a_ : List[str] = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__A , __A )
a_ : Union[str, Any] = regressor.predict(__A )
return y_pred[0]
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> float:
"""simple docstring"""
train_user.sort()
a_ : str = np.percentile(__A , 25 )
a_ : Optional[Any] = np.percentile(__A , 75 )
a_ : Any = qa - qa
a_ : Union[str, Any] = qa - (iqr * 0.1)
return low_lim
def SCREAMING_SNAKE_CASE_ ( __A : list , __A : float ) -> bool:
"""simple docstring"""
a_ : Dict = 0
a_ : Optional[int] = 0
for i in list_vote:
if i > actual_result:
a_ : str = not_safe + 1
else:
if abs(abs(__A ) - abs(__A ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
UpperCAmelCase_ : List[str] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
UpperCAmelCase_ : Dict = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
UpperCAmelCase_ : int = Normalizer().fit_transform(data_input_df.values)
# split data
UpperCAmelCase_ : List[str] = normalize_df[:, 2].tolist()
UpperCAmelCase_ : Dict = normalize_df[:, 0].tolist()
UpperCAmelCase_ : List[Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
UpperCAmelCase_ : int = normalize_df[:, [1, 2]].tolist()
UpperCAmelCase_ : List[str] = x[: len(x) - 1]
UpperCAmelCase_ : Any = x[len(x) - 1 :]
# for linear regression & sarimax
UpperCAmelCase_ : Optional[int] = total_date[: len(total_date) - 1]
UpperCAmelCase_ : str = total_user[: len(total_user) - 1]
UpperCAmelCase_ : List[Any] = total_match[: len(total_match) - 1]
UpperCAmelCase_ : Optional[int] = total_date[len(total_date) - 1 :]
UpperCAmelCase_ : Any = total_user[len(total_user) - 1 :]
UpperCAmelCase_ : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
UpperCAmelCase_ : Optional[Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
UpperCAmelCase_ : Optional[Any] = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 32 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[int]=0.999 , snake_case__ : Union[str, Any]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
A = []
for i in range(snake_case__ ):
A = i / num_diffusion_timesteps
A = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class lowerCAmelCase_ ( _lowercase , _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
_lowerCamelCase: Optional[Any] = 2
@register_to_config
def __init__( self : str ,A_ : int = 1000 ,A_ : float = 0.0_00_85 ,A_ : float = 0.0_12 ,A_ : str = "linear" ,A_ : Optional[Union[np.ndarray, List[float]]] = None ,A_ : str = "epsilon" ,A_ : Optional[bool] = False ,A_ : Optional[bool] = False ,A_ : float = 1.0 ,A_ : str = "linspace" ,A_ : int = 0 ,) -> List[str]:
if trained_betas is not None:
A = torch.tensor(A_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
A = torch.linspace(A_ ,A_ ,A_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,A_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A = betas_for_alpha_bar(A_ ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
A = betas_for_alpha_bar(A_ ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
A = 1.0 - self.betas
A = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(A_ ,A_ ,A_ )
A = use_karras_sigmas
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ,A_ : Tuple=None ) -> Tuple:
if schedule_timesteps is None:
A = self.timesteps
A = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A = 1 if len(A_ ) > 1 else 0
else:
A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
A = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : torch.FloatTensor ,A_ : Union[float, torch.FloatTensor] ,) -> torch.FloatTensor:
A = self.index_for_timestep(A_ )
A = self.sigmas[step_index]
A = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : Union[str, torch.device] = None ,A_ : Optional[int] = None ,) -> Optional[Any]:
A = num_inference_steps
A = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A = np.linspace(0 ,num_train_timesteps - 1 ,A_ ,dtype=A_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(0 ,A_ ) * step_ratio).round()[::-1].copy().astype(A_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(A_ ,0 ,-step_ratio )).round().copy().astype(A_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
A = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A = np.log(A_ )
A = np.interp(A_ ,np.arange(0 ,len(A_ ) ) ,A_ )
if self.config.use_karras_sigmas:
A = self._convert_to_karras(in_sigmas=A_ ,num_inference_steps=self.num_inference_steps )
A = np.array([self._sigma_to_t(A_ ,A_ ) for sigma in sigmas] )
A = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A = torch.from_numpy(A_ ).to(device=A_ )
A = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A = torch.from_numpy(A_ )
A = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(A_ ).startswith('mps' ):
# mps does not support float64
A = timesteps.to(A_ ,dtype=torch.floataa )
else:
A = timesteps.to(device=A_ )
# empty dt and derivative
A = None
A = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A = defaultdict(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : List[str] ) -> Dict:
# get log sigma
A = np.log(A_ )
# get distribution
A = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A = low_idx + 1
A = log_sigmas[low_idx]
A = log_sigmas[high_idx]
# interpolate sigmas
A = (low - log_sigma) / (low - high)
A = np.clip(A_ ,0 ,1 )
# transform interpolation to time range
A = (1 - w) * low_idx + w * high_idx
A = t.reshape(sigma.shape )
return t
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : torch.FloatTensor ,A_ : int ) -> torch.FloatTensor:
A = in_sigmas[-1].item()
A = in_sigmas[0].item()
A = 7.0 # 7.0 is the value used in the paper
A = np.linspace(0 ,1 ,A_ )
A = sigma_min ** (1 / rho)
A = sigma_max ** (1 / rho)
A = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return self.dt is None
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : Union[float, torch.FloatTensor] ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : bool = True ,) -> Union[SchedulerOutput, Tuple]:
A = self.index_for_timestep(A_ )
# advance index counter by 1
A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A = self.sigmas[step_index]
A = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A = self.sigmas[step_index - 1]
A = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A = 0
A = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A = sigma_hat if self.state_in_first_order else sigma_next
A = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A = sigma_hat if self.state_in_first_order else sigma_next
A = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
A = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A = sigma_next - sigma_hat
# store for 2nd order step
A = derivative
A = dt
A = sample
else:
# 2. 2nd order / Heun's method
A = (sample - pred_original_sample) / sigma_next
A = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A = self.dt
A = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A = None
A = None
A = None
A = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A_ ):
# mps does not support float64
A = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
A = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
A = self.timesteps.to(original_samples.device )
A = timesteps.to(original_samples.device )
A = [self.index_for_timestep(A_ ,A_ ) for t in timesteps]
A = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A = sigma.unsqueeze(-1 )
A = original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ) -> int:
return self.config.num_train_timesteps | 74 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowerCAmelCase__ : List[Any] =(720, 1280) # Height, Width
lowerCAmelCase__ : Union[str, Any] =(0.4, 0.6) # if height or width lower than this scale, drop it.
lowerCAmelCase__ : Union[str, Any] =1 / 100
lowerCAmelCase__ : int =''''''
lowerCAmelCase__ : List[Any] =''''''
lowerCAmelCase__ : List[str] =''''''
lowerCAmelCase__ : str =250
def __lowercase ( ) -> None:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_dataset(a__ , a__ )
for index in range(a__ ):
__SCREAMING_SNAKE_CASE = random.sample(range(len(a__ ) ) , 4 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = update_image_and_anno(
a__ , a__ , a__ , a__ , a__ , filter_scale=a__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__SCREAMING_SNAKE_CASE = random_chars(32 )
__SCREAMING_SNAKE_CASE = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
__SCREAMING_SNAKE_CASE = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , a__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__SCREAMING_SNAKE_CASE = []
for anno in new_annos:
__SCREAMING_SNAKE_CASE = anno[3] - anno[1]
__SCREAMING_SNAKE_CASE = anno[4] - anno[2]
__SCREAMING_SNAKE_CASE = anno[1] + width / 2
__SCREAMING_SNAKE_CASE = anno[2] + height / 2
__SCREAMING_SNAKE_CASE = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(a__ )
with open(f"""{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def __lowercase ( a__ , a__ ) -> tuple[list, list]:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for label_file in glob.glob(os.path.join(a__ , '*.txt' ) ):
__SCREAMING_SNAKE_CASE = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(a__ ) as in_file:
__SCREAMING_SNAKE_CASE = in_file.readlines()
__SCREAMING_SNAKE_CASE = os.path.join(a__ , f"""{label_name}.jpg""" )
__SCREAMING_SNAKE_CASE = []
for obj_list in obj_lists:
__SCREAMING_SNAKE_CASE = obj_list.rstrip('\n' ).split(' ' )
__SCREAMING_SNAKE_CASE = float(obj[1] ) - float(obj[3] ) / 2
__SCREAMING_SNAKE_CASE = float(obj[2] ) - float(obj[4] ) / 2
__SCREAMING_SNAKE_CASE = float(obj[1] ) + float(obj[3] ) / 2
__SCREAMING_SNAKE_CASE = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(a__ )
labels.append(a__ )
return img_paths, labels
def __lowercase ( a__ , a__ , a__ , a__ , a__ , a__ = 0.0 , ) -> tuple[list, list, str]:
__SCREAMING_SNAKE_CASE = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__SCREAMING_SNAKE_CASE = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__SCREAMING_SNAKE_CASE = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__SCREAMING_SNAKE_CASE = int(scale_x * output_size[1] )
__SCREAMING_SNAKE_CASE = int(scale_y * output_size[0] )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for i, index in enumerate(a__ ):
__SCREAMING_SNAKE_CASE = all_img_list[index]
path_list.append(a__ )
__SCREAMING_SNAKE_CASE = all_annos[index]
__SCREAMING_SNAKE_CASE = cva.imread(a__ )
if i == 0: # top-left
__SCREAMING_SNAKE_CASE = cva.resize(a__ , (divid_point_x, divid_point_y) )
__SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
__SCREAMING_SNAKE_CASE = bbox[1] * scale_x
__SCREAMING_SNAKE_CASE = bbox[2] * scale_y
__SCREAMING_SNAKE_CASE = bbox[3] * scale_x
__SCREAMING_SNAKE_CASE = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__SCREAMING_SNAKE_CASE = cva.resize(a__ , (output_size[1] - divid_point_x, divid_point_y) )
__SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
__SCREAMING_SNAKE_CASE = scale_x + bbox[1] * (1 - scale_x)
__SCREAMING_SNAKE_CASE = bbox[2] * scale_y
__SCREAMING_SNAKE_CASE = scale_x + bbox[3] * (1 - scale_x)
__SCREAMING_SNAKE_CASE = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__SCREAMING_SNAKE_CASE = cva.resize(a__ , (divid_point_x, output_size[0] - divid_point_y) )
__SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
__SCREAMING_SNAKE_CASE = bbox[1] * scale_x
__SCREAMING_SNAKE_CASE = scale_y + bbox[2] * (1 - scale_y)
__SCREAMING_SNAKE_CASE = bbox[3] * scale_x
__SCREAMING_SNAKE_CASE = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__SCREAMING_SNAKE_CASE = cva.resize(
a__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
__SCREAMING_SNAKE_CASE = scale_x + bbox[1] * (1 - scale_x)
__SCREAMING_SNAKE_CASE = scale_y + bbox[2] * (1 - scale_y)
__SCREAMING_SNAKE_CASE = scale_x + bbox[3] * (1 - scale_x)
__SCREAMING_SNAKE_CASE = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__SCREAMING_SNAKE_CASE = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __lowercase ( a__ ) -> str:
assert number_char > 1, "The number of character should greater than 1"
__SCREAMING_SNAKE_CASE = ascii_lowercase + digits
return "".join(random.choice(a__ ) for _ in range(a__ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 118 |
from collections import deque
from .hash_table import HashTable
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , *_A , **_A ):
'''simple docstring'''
super().__init__(*_A , **_A )
def _A ( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_A )
__SCREAMING_SNAKE_CASE = self.values[key]
def _A ( self ):
'''simple docstring'''
return (
sum(self.charge_factor - len(_A ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _A ( self , _A , _A=None ):
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_A ) == 0
):
return key
return super()._collision_resolution(_A , _A )
| 118 | 1 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = ['input_values', 'attention_mask']
def __init__( self , lowercase = 1 , lowercase = 16_000 , lowercase = 0.0 , lowercase = False , lowercase = 80 , lowercase = 16 , lowercase = 64 , lowercase = "hann_window" , lowercase = 1.0 , lowercase = 80 , lowercase = 7_600 , lowercase = 1e-10 , lowercase = 2 , lowercase = True , **lowercase , ) -> List[str]:
super().__init__(feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , **lowercase )
lowerCAmelCase = do_normalize
lowerCAmelCase = return_attention_mask
lowerCAmelCase = num_mel_bins
lowerCAmelCase = hop_length
lowerCAmelCase = win_length
lowerCAmelCase = win_function
lowerCAmelCase = frame_signal_scale
lowerCAmelCase = fmin
lowerCAmelCase = fmax
lowerCAmelCase = mel_floor
lowerCAmelCase = reduction_factor
lowerCAmelCase = win_length * sampling_rate // 1_000
lowerCAmelCase = hop_length * sampling_rate // 1_000
lowerCAmelCase = optimal_fft_length(self.sample_size )
lowerCAmelCase = (self.n_fft // 2) + 1
lowerCAmelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowercase )
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="""slaney""" , mel_scale="""slaney""" , )
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" , lowercase , )
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" , lowercase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _snake_case ( lowercase , lowercase , lowercase = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
lowerCAmelCase = np.array(lowercase , np.intaa )
lowerCAmelCase = []
for vector, length in zip(lowercase , attention_mask.sum(-1 ) ):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(lowercase )
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def _snake_case ( self , lowercase , ) -> np.ndarray:
lowerCAmelCase = spectrogram(
lowercase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="""log10""" , )
return log_mel_spec.T
def __call__( self , lowercase = None , lowercase = None , lowercase = False , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""" )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if audio is not None:
lowerCAmelCase = self._process_audio(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase , )
else:
lowerCAmelCase = None
if audio_target is not None:
lowerCAmelCase = self._process_audio(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase , )
if inputs is None:
return inputs_target
else:
lowerCAmelCase = inputs_target["""input_values"""]
lowerCAmelCase = inputs_target.get("""attention_mask""" )
if decoder_attention_mask is not None:
lowerCAmelCase = decoder_attention_mask
return inputs
def _snake_case ( self , lowercase , lowercase = False , lowercase = False , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchFeature:
lowerCAmelCase = isinstance(lowercase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCAmelCase = is_batched_numpy or (
isinstance(lowercase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase = [np.asarray(lowercase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowercase , np.ndarray ):
lowerCAmelCase = np.asarray(lowercase , dtype=np.floataa )
elif isinstance(lowercase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase = [speech]
# needed to make pad() work on spectrogram inputs
lowerCAmelCase = self.feature_size
# convert into correct format for padding
if is_target:
lowerCAmelCase = [self._extract_mel_features(lowercase ) for waveform in speech]
lowerCAmelCase = BatchFeature({"""input_values""": features} )
lowerCAmelCase = self.num_mel_bins
else:
lowerCAmelCase = BatchFeature({"""input_values""": speech} )
lowerCAmelCase = self.pad(
lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , **lowercase , )
lowerCAmelCase = feature_size_hack
# convert input values to correct format
lowerCAmelCase = padded_inputs["""input_values"""]
if not isinstance(input_values[0] , np.ndarray ):
lowerCAmelCase = [np.asarray(lowercase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowercase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowerCAmelCase = [array.astype(np.floataa ) for array in input_values]
elif isinstance(lowercase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowerCAmelCase = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowerCAmelCase = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
lowerCAmelCase = [np.asarray(lowercase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowerCAmelCase = (
attention_mask
if self._get_padding_strategies(lowercase , max_length=lowercase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] , attention_mask=lowercase , padding_value=self.padding_value )
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(lowercase )
return padded_inputs
def _snake_case ( self ) -> Dict[str, Any]:
lowerCAmelCase = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowerCAmelCase = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 46 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=False , lowercase=False , lowercase=False , lowercase=2 , lowercase=99 , lowercase=0 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=12 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase="last" , lowercase=None , lowercase=None , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_lengths
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = gelu_activation
lowerCAmelCase = sinusoidal_embeddings
lowerCAmelCase = causal
lowerCAmelCase = asm
lowerCAmelCase = n_langs
lowerCAmelCase = vocab_size
lowerCAmelCase = n_special
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = summary_type
lowerCAmelCase = use_proj
lowerCAmelCase = scope
def _snake_case ( self ) -> int:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_input_lengths:
lowerCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self ) -> List[Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Any:
lowerCAmelCase = FlaubertModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , lengths=lowercase , langs=lowercase )
lowerCAmelCase = model(lowercase , langs=lowercase )
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = FlaubertWithLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> str:
lowerCAmelCase = FlaubertForQuestionAnsweringSimple(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , start_positions=lowercase , end_positions=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Dict:
lowerCAmelCase = FlaubertForQuestionAnswering(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , p_mask=lowercase , )
lowerCAmelCase = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
lowerCAmelCase = model(lowercase , start_positions=lowercase , end_positions=lowercase )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = FlaubertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = self.num_labels
lowerCAmelCase = FlaubertForTokenClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = self.num_choices
lowerCAmelCase = FlaubertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self , lowercase , lowercase , lowercase=False ) -> Optional[Any]:
lowerCAmelCase = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = FlaubertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , emb_dim=37 )
def _snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase )
@slow
def _snake_case ( self ) -> Tuple:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = FlaubertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@slow
@require_torch_gpu
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCAmelCase = True
lowerCAmelCase = model_class(config=lowercase )
lowerCAmelCase = self._prepare_for_class(lowercase , lowercase )
lowerCAmelCase = torch.jit.trace(
lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase , os.path.join(lowercase , """traced_model.pt""" ) )
lowerCAmelCase = torch.jit.load(os.path.join(lowercase , """traced_model.pt""" ) , map_location=lowercase )
loaded(inputs_dict["""input_ids"""].to(lowercase ) , inputs_dict["""attention_mask"""].to(lowercase ) )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCAmelCase = model(lowercase )[0]
lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4 ) )
| 46 | 1 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
SCREAMING_SNAKE_CASE : List[str] = n - k
# Calculate C(n,k)
for i in range(__UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
return binomial_coefficient(2 * node_count ,__UpperCamelCase ) // (node_count + 1)
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if n < 0:
raise ValueError('factorial() not defined for negative values' )
SCREAMING_SNAKE_CASE : Optional[Any] = 1
for i in range(1 ,n + 1 ):
result *= i
return result
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
return catalan_number(__UpperCamelCase ) * factorial(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
F"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 246 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _a :
'''simple docstring'''
A : Tuple = BlenderbotSmallConfig
A : Optional[int] = {}
A : Any = '''gelu'''
def __init__( self, A, A=13, A=7, A=True, A=False, A=99, A=32, A=2, A=4, A=37, A=0.1, A=0.1, A=20, A=2, A=1, A=0, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = seq_length
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_labels
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
SCREAMING_SNAKE_CASE : List[str] = pad_token_id
SCREAMING_SNAKE_CASE : Union[str, Any] = bos_token_id
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
SCREAMING_SNAKE_CASE : str = tf.concat([input_ids, eos_tensor], axis=1 )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
SCREAMING_SNAKE_CASE : List[str] = prepare_blenderbot_small_inputs_dict(A, A, A )
return config, inputs_dict
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = TFBlenderbotSmallModel(config=A ).get_decoder()
SCREAMING_SNAKE_CASE : Optional[int] = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE : List[Any] = input_ids[:1, :]
SCREAMING_SNAKE_CASE : List[Any] = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE : Dict = inputs_dict['head_mask']
SCREAMING_SNAKE_CASE : int = 1
# first forward pass
SCREAMING_SNAKE_CASE : Union[str, Any] = model(A, attention_mask=A, head_mask=A, use_cache=A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE : Tuple = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.concat([input_ids, next_tokens], axis=-1 )
SCREAMING_SNAKE_CASE : str = tf.concat([attention_mask, next_attn_mask], axis=-1 )
SCREAMING_SNAKE_CASE : Any = model(A, attention_mask=A )[0]
SCREAMING_SNAKE_CASE : List[str] = model(A, attention_mask=A, past_key_values=A )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,), output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE : List[str] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A, A, rtol=1E-3 )
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: int ,__UpperCamelCase: Optional[Any]=None ,__UpperCamelCase: List[str]=None ,__UpperCamelCase: int=None ,__UpperCamelCase: Any=None ,__UpperCamelCase: Union[str, Any]=None ,):
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(__UpperCamelCase ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : Any = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[str] = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
A : List[str] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
A : List[str] = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
A : int = True
A : Optional[int] = False
A : str = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = TFBlenderbotSmallModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(self, config_class=A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
@require_tokenizers
@require_tf
class _a ( unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
A : List[Any] = '''facebook/blenderbot_small-90M'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.tokenizer(self.src_text, return_tensors='tf' )
SCREAMING_SNAKE_CASE : int = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=A, )
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=A )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 246 | 1 |
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
__UpperCamelCase = namedtuple('''covid_data''', '''cases deaths recovered''')
def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] = "https://www.worldometers.info/coronavirus/" ) -> str:
SCREAMING_SNAKE_CASE = '//div[@class = \"maincounter-number\"]/span/text()'
return covid_data(*html.fromstring(requests.get(snake_case_ ).content ).xpath(snake_case_ ) )
__UpperCamelCase = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 113 |
from typing import Any
class __lowerCAmelCase :
def __init__( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
_UpperCAmelCase = data
_UpperCAmelCase = None
class __lowerCAmelCase :
def __init__( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = None
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = self.head
while temp is not None:
print(temp.data , end=" " )
_UpperCAmelCase = temp.next
print()
def UpperCamelCase ( self : Any , snake_case__ : Any ):
"""simple docstring"""
_UpperCAmelCase = Node(snake_case__ )
_UpperCAmelCase = self.head
_UpperCAmelCase = new_node
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Optional[Any] ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase = node_a.next
_UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase , _UpperCAmelCase = node_a.data, node_a.data
if __name__ == "__main__":
lowercase_ : Union[str, Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 133 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _UpperCAmelCase ( lowerCAmelCase__, lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase__ ="swin"
lowerCamelCase__ ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self , a_=2_24 , a_=4 , a_=3 , a_=96 , a_=[2, 2, 6, 2] , a_=[3, 6, 12, 24] , a_=7 , a_=4.0 , a_=True , a_=0.0 , a_=0.0 , a_=0.1 , a_="gelu" , a_=False , a_=0.02 , a_=1E-5 , a_=32 , a_=None , a_=None , **a_ , ):
'''simple docstring'''
super().__init__(**a__ )
__snake_case : Dict = image_size
__snake_case : int = patch_size
__snake_case : List[Any] = num_channels
__snake_case : str = embed_dim
__snake_case : Optional[int] = depths
__snake_case : Optional[Any] = len(a__ )
__snake_case : Optional[int] = num_heads
__snake_case : Optional[int] = window_size
__snake_case : Tuple = mlp_ratio
__snake_case : int = qkv_bias
__snake_case : str = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : List[str] = drop_path_rate
__snake_case : int = hidden_act
__snake_case : int = use_absolute_embeddings
__snake_case : List[str] = layer_norm_eps
__snake_case : Tuple = initializer_range
__snake_case : List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case : Optional[Any] = int(embed_dim * 2 ** (len(a__ ) - 1) )
__snake_case : Union[str, Any] = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(a__ ) + 1 )]
__snake_case , __snake_case : Tuple = get_aligned_output_features_output_indices(
out_features=a__ , out_indices=a__ , stage_names=self.stage_names )
class _UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase__ =version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 1E-4
| 363 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =['image_processor', 'tokenizer']
lowerCamelCase__ ='CLIPImageProcessor'
lowerCamelCase__ =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self , a_=None , a_=None , **a_ ):
'''simple docstring'''
__snake_case : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a_ , )
__snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a_ , a_ )
def __call__(self , a_=None , a_=None , a_=None , **a_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case : Dict = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if images is not None:
__snake_case : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
__snake_case : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.decode(*a_ , **a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.tokenizer.model_input_names
__snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24 | 0 |
def __lowercase ( _A ) -> str:
SCREAMING_SNAKE_CASE : int = 0
for ch in input_str:
SCREAMING_SNAKE_CASE : Any = ord(_A )
SCREAMING_SNAKE_CASE : List[str] = pow(2 , _A )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__: Dict = logging.get_logger(__name__)
__magic_name__: List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__: Optional[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
__magic_name__: List[Any] = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
@lru_cache()
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = (
list(range(ord("""!""" ), ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ), ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ), ord("""ÿ""" ) + 1 ) )
)
__magic_name__ : Any = bs[:]
__magic_name__ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
__magic_name__ : List[str] = [chr(_A ) for n in cs]
return dict(zip(_A, _A ) )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : str = set()
__magic_name__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : List[Any] = char
return pairs
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Dict:
__magic_name__ : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__magic_name__ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__magic_name__ : str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__magic_name__ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__magic_name__ : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__magic_name__ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__magic_name__ : Union[str, Any] = json.load(lowerCAmelCase__ )
__magic_name__ : Any = {v: k for k, v in self.encoder.items()}
__magic_name__ : Tuple = errors # how to handle errors in decoding
__magic_name__ : Tuple = bytes_to_unicode()
__magic_name__ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__magic_name__ : Optional[Any] = merges_handle.read().split("""\n""" )[1:-1]
__magic_name__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
__magic_name__ : int = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : str = {}
__magic_name__ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__magic_name__ : Union[str, Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def __magic_name__ ( self ) -> Optional[Any]:
return len(self.encoder )
def __magic_name__ ( self ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
if token in self.cache:
return self.cache[token]
__magic_name__ : Union[str, Any] = tuple(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__magic_name__ : Union[str, Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ ,__magic_name__ : List[str] = bigram
__magic_name__ : Any = []
__magic_name__ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
__magic_name__ : str = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Optional[Any] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : str = tuple(lowerCAmelCase__ )
__magic_name__ : Optional[int] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__magic_name__ : List[str] = get_pairs(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = """ """.join(lowerCAmelCase__ )
__magic_name__ : str = word
return word
def __magic_name__ ( self , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : str = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__magic_name__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.decoder.get(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Tuple = """""".join(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : Tuple = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__magic_name__ : Optional[Any] = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__magic_name__ : Optional[int] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : List[str] = [self.cls_token_id]
__magic_name__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : Dict = [self.sep_token_id]
__magic_name__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__magic_name__ : List[Any] = """ """ + text
return (text, kwargs)
| 342 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a__ : int = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
a__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 |
from math import pi, sqrt, tan
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
SCREAMING_SNAKE_CASE : Optional[Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(a__ , 2 ) * torus_radius * tube_radius
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
SCREAMING_SNAKE_CASE : int = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE : List[str] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F"Rectangle: {area_rectangle(10, 20) = }")
print(F"Square: {area_square(10) = }")
print(F"Triangle: {area_triangle(10, 10) = }")
print(F"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(F"Parallelogram: {area_parallelogram(10, 20) = }")
print(F"Rhombus: {area_rhombus(10, 20) = }")
print(F"Trapezium: {area_trapezium(10, 20, 30) = }")
print(F"Circle: {area_circle(20) = }")
print(F"Ellipse: {area_ellipse(10, 20) = }")
print('''\nSurface Areas of various geometric shapes: \n''')
print(F"Cube: {surface_area_cube(20) = }")
print(F"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(F"Sphere: {surface_area_sphere(20) = }")
print(F"Hemisphere: {surface_area_hemisphere(20) = }")
print(F"Cone: {surface_area_cone(10, 20) = }")
print(F"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(F"Cylinder: {surface_area_cylinder(10, 20) = }")
print(F"Torus: {surface_area_torus(20, 10) = }")
print(F"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(F"Square: {area_reg_polygon(4, 10) = }")
print(F"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 19 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 58 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Optional[int] = ShapEImgaImgPipeline
__A : Tuple = ['''image''']
__A : Any = ['''image''']
__A : Optional[Any] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
__A : Dict = False
@property
def __lowercase ( self) -> Any:
'''simple docstring'''
return 32
@property
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return 32
@property
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
return 8
@property
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0)
a__ : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
a__ : Dict = CLIPVisionModel(lowercase)
return model
@property
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : str = CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase , do_normalize=lowercase , do_resize=lowercase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def __lowercase ( self) -> str:
'''simple docstring'''
torch.manual_seed(0)
a__ : str = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
a__ : Any = PriorTransformer(**lowercase)
return model
@property
def __lowercase ( self) -> Any:
'''simple docstring'''
torch.manual_seed(0)
a__ : List[Any] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
a__ : List[str] = ShapERenderer(**lowercase)
return model
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Dict = self.dummy_prior
a__ : List[str] = self.dummy_image_encoder
a__ : int = self.dummy_image_processor
a__ : str = self.dummy_renderer
a__ : Optional[int] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=lowercase , clip_sample=lowercase , clip_sample_range=1.0 , )
a__ : List[Any] = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowercase ( self , lowercase , lowercase=0) -> List[str]:
'''simple docstring'''
a__ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase)).to(lowercase)
if str(lowercase).startswith('mps'):
a__ : List[str] = torch.manual_seed(lowercase)
else:
a__ : str = torch.Generator(device=lowercase).manual_seed(lowercase)
a__ : Tuple = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : int = 'cpu'
a__ : List[str] = self.get_dummy_components()
a__ : Dict = self.pipeline_class(**lowercase)
a__ : Optional[int] = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__ : Tuple = pipe(**self.get_dummy_inputs(lowercase))
a__ : Any = output.images[0]
a__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a__ : List[str] = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __lowercase ( self) -> Any:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : str = torch_device == 'cpu'
a__ : Tuple = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowercase , relax_max_difference=lowercase , )
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[str] = self.get_dummy_components()
a__ : str = self.pipeline_class(**lowercase)
a__ : List[str] = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__ : Optional[Any] = 1
a__ : List[str] = 2
a__ : Optional[Any] = self.get_dummy_inputs(lowercase)
for key in inputs.keys():
if key in self.batch_params:
a__ : Any = batch_size * [inputs[key]]
a__ : int = pipe(**lowercase , num_images_per_prompt=lowercase)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png')
a__ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy')
a__ : List[str] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img')
a__ : Tuple = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__ : List[Any] = torch.Generator(device=lowercase).manual_seed(0)
a__ : Optional[int] = pipe(
lowercase , generator=lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowercase , lowercase)
| 99 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
snake_case__ : List[str] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' ,safety_checker=__lowercase ,cache_dir=__lowercase )
snake_case__ : Union[str, Any] = [t[-1] for t in os.walk(os.path.join(__lowercase ,os.listdir(__lowercase )[0] ,'''snapshots''' ) )]
snake_case__ : Optional[Any] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :str ):
snake_case__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' ,safety_checker=__lowercase )
snake_case__ : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Optional[Any] = jax.random.PRNGKey(0 )
snake_case__ : Union[str, Any] = 4
snake_case__ : Union[str, Any] = jax.device_count()
snake_case__ : Tuple = num_samples * [prompt]
snake_case__ : Optional[int] = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
snake_case__ : Optional[int] = replicate(__lowercase )
snake_case__ : Optional[int] = jax.random.split(__lowercase ,__lowercase )
snake_case__ : Union[str, Any] = shard(__lowercase )
snake_case__ : List[str] = pipeline(__lowercase ,__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 4.151_4745 ) < 1e-3
assert np.abs(np.abs(__lowercase ,dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
snake_case__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__lowercase ) == num_samples
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''flax''' ,safety_checker=__lowercase )
snake_case__ : Optional[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : int = jax.random.PRNGKey(0 )
snake_case__ : Tuple = 5_0
snake_case__ : Union[str, Any] = jax.device_count()
snake_case__ : List[str] = num_samples * [prompt]
snake_case__ : Union[str, Any] = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
snake_case__ : Union[str, Any] = replicate(__lowercase )
snake_case__ : Optional[int] = jax.random.split(__lowercase ,__lowercase )
snake_case__ : List[str] = shard(__lowercase )
snake_case__ : List[str] = pipeline(__lowercase ,__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0565_2401) ) < 1e-3
assert np.abs((np.abs(__lowercase ,dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=__lowercase )
snake_case__ : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Any = jax.random.PRNGKey(0 )
snake_case__ : Optional[Any] = 5_0
snake_case__ : Dict = jax.device_count()
snake_case__ : Dict = num_samples * [prompt]
snake_case__ : Union[str, Any] = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
snake_case__ : str = replicate(__lowercase )
snake_case__ : Any = jax.random.split(__lowercase ,__lowercase )
snake_case__ : str = shard(__lowercase )
snake_case__ : Union[str, Any] = pipeline(__lowercase ,__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0400_3906) ) < 1e-3
assert np.abs((np.abs(__lowercase ,dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa )
snake_case__ : List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Dict = jax.random.PRNGKey(0 )
snake_case__ : Dict = 5_0
snake_case__ : Tuple = jax.device_count()
snake_case__ : Dict = num_samples * [prompt]
snake_case__ : Any = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
snake_case__ : List[str] = replicate(__lowercase )
snake_case__ : List[str] = jax.random.split(__lowercase ,__lowercase )
snake_case__ : str = shard(__lowercase )
snake_case__ : List[Any] = pipeline(__lowercase ,__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0400_3906) ) < 1e-3
assert np.abs((np.abs(__lowercase ,dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Optional[Any] = FlaxDDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,set_alpha_to_one=__lowercase ,steps_offset=1 ,)
snake_case__ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,scheduler=__lowercase ,safety_checker=__lowercase ,)
snake_case__ : Dict = scheduler.create_state()
snake_case__ : Any = scheduler_state
snake_case__ : Tuple = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : int = jax.random.PRNGKey(0 )
snake_case__ : int = 5_0
snake_case__ : int = jax.device_count()
snake_case__ : Tuple = num_samples * [prompt]
snake_case__ : Optional[int] = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
snake_case__ : Union[str, Any] = replicate(__lowercase )
snake_case__ : Tuple = jax.random.split(__lowercase ,__lowercase )
snake_case__ : Optional[Any] = shard(__lowercase )
snake_case__ : List[str] = pipeline(__lowercase ,__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1e-3
assert np.abs((np.abs(__lowercase ,dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : int = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Tuple = jax.device_count()
snake_case__ : Dict = num_samples * [prompt]
snake_case__ : List[str] = jax.random.split(jax.random.PRNGKey(0 ) ,__lowercase )
snake_case__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=__lowercase ,)
snake_case__ : Union[str, Any] = replicate(__lowercase )
snake_case__ : int = pipeline.prepare_inputs(__lowercase )
snake_case__ : Optional[int] = shard(__lowercase )
snake_case__ : int = pipeline(__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
snake_case__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
snake_case__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=__lowercase ,use_memory_efficient_attention=__lowercase ,)
snake_case__ : str = replicate(__lowercase )
snake_case__ : List[Any] = pipeline.prepare_inputs(__lowercase )
snake_case__ : Tuple = shard(__lowercase )
snake_case__ : List[Any] = pipeline(__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
snake_case__ : str = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 369 |
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
"""simple docstring"""
snake_case__ : Optional[int] = len(__lowerCAmelCase ) + 1
snake_case__ : Tuple = len(__lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
snake_case__ : str = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
# since string of zero length match pattern of zero length
snake_case__ : int = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __lowerCAmelCase ):
snake_case__ : Dict = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __lowerCAmelCase ):
snake_case__ : str = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __lowerCAmelCase ):
for j in range(1 , __lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
snake_case__ : Dict = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
snake_case__ : Union[str, Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
snake_case__ : List[str] = dp[i - 1][j]
else:
snake_case__ : Union[str, Any] = 0
else:
snake_case__ : Tuple = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
A__ = '''aab'''
A__ = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 44 | 0 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A=None , **__A ) -> Optional[int]:
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
lowerCAmelCase_ :Dict = model
lowerCAmelCase_ :List[Any] = kwargs.get("""model_save_dir""" , __A )
lowerCAmelCase_ :Dict = kwargs.get("""latest_model_name""" , __A )
def __call__( self , **__A ) -> Any:
lowerCAmelCase_ :List[Any] = {k: np.array(__A ) for k, v in kwargs.items()}
return self.model.run(__A , __A )
@staticmethod
def __lowerCAmelCase ( __A , __A=None , __A=None ) -> str:
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
lowerCAmelCase_ :Optional[Any] = """CPUExecutionProvider"""
return ort.InferenceSession(__A , providers=[provider] , sess_options=__A )
def __lowerCAmelCase ( self , __A , __A = None , **__A ) -> Union[str, Any]:
lowerCAmelCase_ :Union[str, Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowerCAmelCase_ :Any = self.model_save_dir.joinpath(self.latest_model_name )
lowerCAmelCase_ :str = Path(__A ).joinpath(__A )
try:
shutil.copyfile(__A , __A )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowerCAmelCase_ :Union[str, Any] = self.model_save_dir.joinpath(__A )
if src_path.exists():
lowerCAmelCase_ :List[str] = Path(__A ).joinpath(__A )
try:
shutil.copyfile(__A , __A )
except shutil.SameFileError:
pass
def __lowerCAmelCase ( self , __A , **__A , ) -> Union[str, Any]:
if os.path.isfile(__A ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(__A , exist_ok=__A )
# saving model weights/files
self._save_pretrained(__A , **__A )
@classmethod
def __lowerCAmelCase ( cls , __A , __A = None , __A = None , __A = False , __A = None , __A = None , __A = None , __A = None , **__A , ) -> int:
lowerCAmelCase_ :List[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__A ):
lowerCAmelCase_ :Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(__A , __A ) , provider=__A , sess_options=__A )
lowerCAmelCase_ :int = Path(__A )
# load model from hub
else:
# download model
lowerCAmelCase_ :Optional[int] = hf_hub_download(
repo_id=__A , filename=__A , use_auth_token=__A , revision=__A , cache_dir=__A , force_download=__A , )
lowerCAmelCase_ :Optional[int] = Path(__A ).parent
lowerCAmelCase_ :Optional[Any] = Path(__A ).name
lowerCAmelCase_ :str = OnnxRuntimeModel.load_model(__A , provider=__A , sess_options=__A )
return cls(model=__A , **__A )
@classmethod
def __lowerCAmelCase ( cls , __A , __A = True , __A = None , __A = None , **__A , ) -> Union[str, Any]:
lowerCAmelCase_ :str = None
if len(str(__A ).split("""@""" ) ) == 2:
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = model_id.split("""@""" )
return cls._from_pretrained(
model_id=__A , revision=__A , cache_dir=__A , force_download=__A , use_auth_token=__A , **__A , )
| 84 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def _snake_case ( ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = 1_0
lowerCAmelCase_ :Optional[int] = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
lowerCAmelCase_ :int = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(lowercase__ ) ),
} , features=lowercase__ , )
return dataset
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple , lowercase__ : int ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowercase__ )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
lowerCAmelCase_ :List[Any] = FILE_CONTENT
with open(lowercase__ , """w""" ) as f:
f.write(lowercase__ )
return filename
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[Any] ) -> Tuple:
'''simple docstring'''
import bza
lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" )
with bza.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
import gzip
lowerCAmelCase_ :int = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" )
with gzip.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict ) -> Optional[int]:
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCAmelCase_ :List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
lowerCAmelCase_ :int = bytes(lowercase__ , """utf-8""" )
with lza.frame.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[int] ) -> Any:
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowercase__ , """w""" ) as archive:
archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
import tarfile
lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowercase__ , """w""" ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> str:
'''simple docstring'''
import lzma
lowerCAmelCase_ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
lowerCAmelCase_ :Optional[Any] = bytes(lowercase__ , """utf-8""" )
with lzma.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Any:
'''simple docstring'''
import zipfile
lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : int ) -> Tuple:
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
lowerCAmelCase_ :Any = bytes(lowercase__ , """utf-8""" )
with zstd.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] ) -> str:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
lowerCAmelCase_ :Any = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowercase__ , """w""" ) as f:
f.write(lowercase__ )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="""session""" )
def _snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Tuple = datasets.Dataset.from_dict(lowercase__ )
lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : int ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
lowerCAmelCase_ :Union[str, Any] = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowercase__ , """w""" , newline="""""" ) as f:
lowerCAmelCase_ :Optional[int] = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowercase__ , """w""" , newline="""""" ) as f:
lowerCAmelCase_ :Dict = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : str , lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
import bza
lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowercase__ , """rb""" ) as f:
lowerCAmelCase_ :Union[str, Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowercase__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : str ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
lowerCAmelCase_ :Optional[Any] = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowercase__ , """wb""" ) as f:
lowerCAmelCase_ :Optional[int] = pq.ParquetWriter(lowercase__ , schema=lowercase__ )
lowerCAmelCase_ :List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ )
writer.write_table(lowercase__ )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
lowerCAmelCase_ :Union[str, Any] = {"""data""": DATA}
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : str ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
lowerCAmelCase_ :Optional[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : int , lowercase__ : Dict ) -> Optional[int]:
'''simple docstring'''
import gzip
lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowercase__ , """rb""" ) as orig_file:
with gzip.open(lowercase__ , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> Any:
'''simple docstring'''
import gzip
lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowercase__ , """rb""" ) as orig_file:
with gzip.open(lowercase__ , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[str] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowercase__ , """w""" ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict , lowercase__ : str , lowercase__ : List[str] , lowercase__ : int ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowercase__ , """w""" ) as f:
f.add(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :str = ["""0""", """1""", """2""", """3"""]
lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowercase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :int = ["""0""", """1""", """2""", """3"""]
lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowercase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Dict = ["""0""", """1""", """2""", """3"""]
lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowercase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : str , lowercase__ : int ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : List[str] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowercase__ , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( ) -> int:
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any , lowercase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 84 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
a : str = logging.get_logger(__name__)
a : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
a : Any = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
a : Optional[int] = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
class a ( _lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = BartTokenizer
def __init__( self : int , lowercase_ : str=None , lowercase_ : str=None , lowercase_ : Tuple=None , lowercase_ : Optional[Any]="replace" , lowercase_ : int="<s>" , lowercase_ : Optional[Any]="</s>" , lowercase_ : Any="</s>" , lowercase_ : Optional[int]="<s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Optional[int]="<pad>" , lowercase_ : List[Any]="<mask>" , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=True , **lowercase_ : Union[str, Any] , ):
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ , **lowercase_ , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase_ ) != add_prefix_space:
snake_case_ = getattr(lowercase_ , pre_tok_state.pop('''type''' ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**lowercase_ )
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = '''post_processor'''
snake_case_ = getattr(self.backend_tokenizer , lowercase_ , lowercase_ )
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state['''sep'''] )
if "cls" in state:
snake_case_ = tuple(state['''cls'''] )
snake_case_ = False
if state.get('''add_prefix_space''' , lowercase_ ) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get('''trim_offsets''' , lowercase_ ) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(lowercase_ , state.pop('''type''' ) )
snake_case_ = component_class(**lowercase_ )
setattr(self.backend_tokenizer , lowercase_ , lowercase_ )
@property
def A_ ( self : int ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self : Optional[int] , lowercase_ : Union[str, Any] ):
snake_case_ = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else value
snake_case_ = value
def A_ ( self : str , *lowercase_ : List[str] , **lowercase_ : Optional[Any] ):
snake_case_ = kwargs.get('''is_split_into_words''' , lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*lowercase_ , **lowercase_ )
def A_ ( self : Dict , *lowercase_ : Tuple , **lowercase_ : Tuple ):
snake_case_ = kwargs.get('''is_split_into_words''' , lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*lowercase_ , **lowercase_ )
def A_ ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
snake_case_ = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def A_ ( self : Dict , lowercase_ : Any , lowercase_ : List[str]=None ):
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 72 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class a ( _lowerCamelCase , unittest.TestCase ):
snake_case_ = FlaxAutoencoderKL
@property
def A_ ( self : List[Any] ):
snake_case_ = 4
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = jax.random.uniform(lowercase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def A_ ( self : Tuple ):
snake_case_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
| 72 | 1 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=6_4 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=6_4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ) -> Union[str, Any]:
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_input_mask
_lowercase =use_token_type_ids
_lowercase =use_labels
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_vocab_size
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =num_labels
_lowercase =num_choices
_lowercase =scope
def __A (self ) -> Tuple:
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def __A (self ) -> Dict:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase =None
if self.use_input_mask:
_lowercase =random_attention_mask([self.batch_size, self.seq_length] )
_lowercase =None
_lowercase =None
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase =ids_tensor([self.batch_size] , self.num_choices )
_lowercase =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A (self ) -> str:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =MPNetModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , UpperCAmelCase )
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =MPNetForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
_lowercase =self.num_labels
_lowercase =MPNetForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
_lowercase =self.num_choices
_lowercase =MPNetForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
_lowercase =self.num_labels
_lowercase =MPNetForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A (self ) -> Tuple:
_lowercase =self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) =config_and_inputs
_lowercase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
def __A (self ) -> Optional[Any]:
_lowercase =MPNetModelTester(self )
_lowercase =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def __A (self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __A (self ) -> Dict:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*UpperCAmelCase )
def __A (self ) -> Tuple:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*UpperCAmelCase )
def __A (self ) -> str:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*UpperCAmelCase )
def __A (self ) -> Union[str, Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*UpperCAmelCase )
def __A (self ) -> Optional[Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*UpperCAmelCase )
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
@slow
def __A (self ) -> str:
_lowercase =MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
_lowercase =torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowercase =model(UpperCAmelCase )[0]
_lowercase =torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , UpperCAmelCase )
_lowercase =torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
| 5 |
# flake8: noqa
# Lint as: python3
UpperCAmelCase__ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 5 | 1 |
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : set ) -> int:
A_ ,A_ : Optional[Any] = len(lowerCamelCase__ ), len(grid[0] )
if (
min(lowerCamelCase__ , lowerCamelCase__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A_ : str = 0
count += depth_first_search(lowerCamelCase__ , row + 1 , lowerCamelCase__ , lowerCamelCase__ )
count += depth_first_search(lowerCamelCase__ , row - 1 , lowerCamelCase__ , lowerCamelCase__ )
count += depth_first_search(lowerCamelCase__ , lowerCamelCase__ , col + 1 , lowerCamelCase__ )
count += depth_first_search(lowerCamelCase__ , lowerCamelCase__ , col - 1 , lowerCamelCase__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : List[str] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A_ : List[str] = TextStreamer(_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Dict = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : str = tokenizer.decode(greedy_ids[0] )
A_ : int = TextIteratorStreamer(_lowerCamelCase )
A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
A_ : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : List[str] = -1
A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : Tuple = greedy_ids[:, input_ids.shape[1] :]
A_ : Tuple = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Any = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase )
A_ : List[Any] = -1
A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A_ : List[str] = cs.out[:-1] # Remove the final "\n"
A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Union[str, Any] = -1
A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 )
A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCamelCase ):
A_ : str = ''''''
for new_text in streamer:
streamer_text += new_text
| 4 | 1 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
UpperCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase__ ( A__ : Union[str, Any]=2 , A__ : List[Any]=3 , A__ : Optional[int]=16 , A__ : int = 10 , A__ : int = 2 ):
'''simple docstring'''
def get_dataset(A__ : Union[str, Any] ):
__lowerCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(A__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__lowerCamelCase = get_dataset(A__ )
__lowerCamelCase = get_dataset(A__ )
__lowerCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
__lowerCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] , A__ : Optional[Any] , A__ : Tuple , A__ : Dict , A__ : Union[str, Any]=None ):
'''simple docstring'''
__lowerCamelCase = []
for epoch in range(A__ ):
# Train quickly
model.train()
for batch in dataloader:
__lowerCamelCase, __lowerCamelCase = batch
__lowerCamelCase = model(A__ )
__lowerCamelCase = torch.nn.functional.mse_loss(A__ , A__ )
accelerator.backward(A__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase__( nn.Module):
def __init__( self: Dict ):
super().__init__()
__lowerCamelCase = nn.Parameter(torch.randn(1 ) )
__lowerCamelCase = nn.Parameter(torch.randn(1 ) )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, Any] ):
return x * self.a + self.b
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
__lowerCamelCase = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase_ , automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
__lowerCamelCase = Accelerator(project_config=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def lowerCAmelCase__ ( self: Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
# Train baseline
__lowerCamelCase = Accelerator()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
__lowerCamelCase = os.path.join(UpperCamelCase_ , """initial""" )
accelerator.save_state(UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
__lowerCamelCase = train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
__lowerCamelCase = Accelerator()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
accelerator.load_state(UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = train(2 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save everything
__lowerCamelCase = os.path.join(UpperCamelCase_ , """checkpoint""" )
accelerator.save_state(UpperCamelCase_ )
# Load everything back in and make sure all states work
accelerator.load_state(UpperCamelCase_ )
test_rands += train(1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
__lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
__lowerCamelCase = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
accelerator.save_state()
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
__lowerCamelCase = train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
__lowerCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase_ )
__lowerCamelCase = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
accelerator.load_state(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_0""" ) )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = train(2 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = torch.tensor([1, 2, 3] )
__lowerCamelCase = torch.tensor([2, 3, 4] )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(net.parameters() )
__lowerCamelCase = Accelerator()
with self.assertRaises(UpperCamelCase_ ) as ve:
accelerator.register_for_checkpointing(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def lowerCAmelCase__ ( self: Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase = torch.optim.lr_scheduler.StepLR(UpperCamelCase_ , step_size=1 , gamma=0.99 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
__lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
__lowerCamelCase = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
accelerator.save_state()
__lowerCamelCase = scheduler.state_dict()
train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(UpperCamelCase_ , scheduler.state_dict() )
def lowerCAmelCase__ ( self: Dict ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ , total_limit=2 )
# Train baseline
__lowerCamelCase = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
__lowerCamelCase = accelerator.prepare(UpperCamelCase_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCAmelCase_ = '/tmp/accelerate/state_checkpointing'
UpperCAmelCase_ = DummyModel()
UpperCAmelCase_ = torch.optim.Adam(params=model.parameters(), lr=1E-3)
UpperCAmelCase_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
UpperCAmelCase_ , UpperCAmelCase_ = dummy_dataloaders()
UpperCAmelCase_ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
UpperCAmelCase_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
UpperCAmelCase_ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
UpperCAmelCase_ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
UpperCAmelCase_ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
UpperCAmelCase_ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 12 |
import numpy as np
def UpperCAmelCase ( a_ , a_ , a_ = 1E-12 , a_ = 1_0_0 , ) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(a_ )[0] == np.shape(a_ )[1]
# Ensure proper dimensionality.
assert np.shape(a_ )[0] == np.shape(a_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(a_ ) == np.iscomplexobj(a_ )
__A = np.iscomplexobj(a_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(a_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__A = False
__A = 0
__A = 0
__A = 1E12
while not convergence:
# Multiple matrix by the vector.
__A = np.dot(a_ , a_ )
# Normalize the resulting output vector.
__A = w / np.linalg.norm(a_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__A = vector.conj().T if is_complex else vector.T
__A = np.dot(a_ , np.dot(a_ , a_ ) )
# Check convergence.
__A = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__A = True
__A = lambda_
if is_complex:
__A = np.real(lambda_ )
return lambda_, vector
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
__A = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
__A = np.array([4_1, 4, 2_0] )
__A = real_input_matrix.astype(np.complexaaa )
__A = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__A = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__A = real_input_matrix
__A = real_vector
elif problem_type == "complex":
__A = complex_input_matrix
__A = complex_vector
# Our implementation.
__A , __A = power_iteration(a_ , a_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__A , __A = np.linalg.eigh(a_ )
# Last eigenvalue is the maximum one.
__A = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__A = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(a_ ) - np.abs(a_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 15 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def a__ ( __lowercase ) -> Union[str, Any]:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def a__ ( __lowercase ) -> Union[str, Any]:
_A = create_tensor(__lowercase )
_A = gather(__lowercase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def a__ ( __lowercase ) -> Optional[int]:
_A = [state.process_index]
_A = gather_object(__lowercase )
assert len(__lowercase ) == state.num_processes, f"""{gathered_obj}, {len(__lowercase )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), f"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def a__ ( __lowercase ) -> Tuple:
_A = create_tensor(__lowercase )
_A = broadcast(__lowercase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def a__ ( __lowercase ) -> Tuple:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
_A = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_A = torch.arange(state.num_processes ).to(state.device )
_A = pad_across_processes(__lowercase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def a__ ( __lowercase ) -> Optional[Any]:
# For now runs on only two processes
if state.num_processes != 2:
return
_A = create_tensor(__lowercase )
_A = reduce(__lowercase , "sum" )
_A = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__lowercase , __lowercase ), f"""{reduced_tensor} != {truth_tensor}"""
def a__ ( __lowercase ) -> List[Any]:
# For now runs on only two processes
if state.num_processes != 2:
return
_A = create_tensor(__lowercase )
_A = reduce(__lowercase , "mean" )
_A = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__lowercase , __lowercase ), f"""{reduced_tensor} != {truth_tensor}"""
def a__ ( __lowercase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
def a__ ( ) -> int:
_A = PartialState()
state.print(f"""State: {state}""" )
state.print("testing gather" )
test_gather(__lowercase )
state.print("testing gather_object" )
test_gather_object(__lowercase )
state.print("testing broadcast" )
test_broadcast(__lowercase )
state.print("testing pad_across_processes" )
test_pad_across_processes(__lowercase )
state.print("testing reduce_sum" )
test_reduce_sum(__lowercase )
state.print("testing reduce_mean" )
test_reduce_mean(__lowercase )
if __name__ == "__main__":
main() | 163 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(__lowercase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def a__ ( __lowercase , __lowercase ) -> Optional[Any]:
_A = _distribute_shards(**__lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]:
_A = _split_gen_kwargs(__lowercase , __lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def a__ ( __lowercase , __lowercase ) -> List[Any]:
if expected is RuntimeError:
with pytest.raises(__lowercase ):
_number_of_shards_in_gen_kwargs(__lowercase )
else:
_A = _number_of_shards_in_gen_kwargs(__lowercase )
assert out == expected | 163 | 1 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = job['''started_at''']
__SCREAMING_SNAKE_CASE : List[str] = job['''completed_at''']
__SCREAMING_SNAKE_CASE : List[str] = date_parser.parse(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = date_parser.parse(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__SCREAMING_SNAKE_CASE : Any = start
__SCREAMING_SNAKE_CASE : Optional[int] = end
__SCREAMING_SNAKE_CASE : Dict = duration_in_min
return job_info
def _UpperCamelCase ( lowercase__ , lowercase__=None ):
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if token is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
__SCREAMING_SNAKE_CASE : int = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__SCREAMING_SNAKE_CASE : int = requests.get(lowercase__ , headers=lowercase__ ).json()
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(lowercase__ ) for job in result['''jobs''']} )
__SCREAMING_SNAKE_CASE : Optional[int] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = requests.get(url + F'''&page={i + 2}''' , headers=lowercase__ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(lowercase__ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
__lowerCAmelCase : Tuple =parser.parse_args()
__lowerCAmelCase : Any =get_job_time(args.workflow_run_id)
__lowerCAmelCase : int =dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v["duration"]}""")
| 9 |
"""simple docstring"""
def lowercase ( A_ )-> str:
'''simple docstring'''
if isinstance(A_ , A_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(A_ , A_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
a : Optional[Any] = False
if num < 0:
a : Tuple = True
a : str = -num
a : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(A_ ) for e in binary )
return "0b" + "".join(str(A_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {"""vocab_file""": """spiece.model"""}
_SCREAMING_SNAKE_CASE : Optional[int] = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""AI-Sweden/gpt-sw3-126m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-350m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-1.6b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-6.7b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-20b""": 2_0_4_8,
}
class __a ( __lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : int=False , lowercase_ : List[str]=False , lowercase_ : Tuple=False , lowercase_ : Dict=None , lowercase_ : Tuple=None , lowercase_ : Optional[int]=None , lowercase_ : int=None , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : List[Any] , ):
UpperCamelCase__ : Any ={} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase__ : List[Any] =kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
UpperCamelCase__ : Union[str, Any] ='''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCamelCase__ : str ='''<|endoftext|>''' if eos_token is None else eos_token
UpperCamelCase__ : int ='''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCamelCase__ : int =unk_token if pad_token is None else pad_token
UpperCamelCase__ : int =eos_token if bos_token is None else bos_token
else:
UpperCamelCase__ : Optional[Any] ='''<pad>''' if pad_token is None else pad_token
UpperCamelCase__ : Union[str, Any] ='''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
UpperCamelCase__ : int =do_lower_case
UpperCamelCase__ : str =remove_space
UpperCamelCase__ : List[Any] =keep_accents
UpperCamelCase__ : Optional[Any] =vocab_file
UpperCamelCase__ : int =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
UpperCamelCase__ : int ={''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCamelCase__ : Any =re.compile(
f'''[{''.join(map(UpperCAmelCase__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' )
def __getstate__( self : Any ):
UpperCamelCase__ : Optional[Any] =self.__dict__.copy()
UpperCamelCase__ : Optional[Any] =None
return state
def __setstate__( self : Tuple , lowercase_ : Union[str, Any] ):
UpperCamelCase__ : Any =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ : str ={}
UpperCamelCase__ : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _lowerCAmelCase ( self : Any ):
return len(self.sp_model )
def _lowerCAmelCase ( self : Any , lowercase_ : str ):
UpperCamelCase__ : Tuple =self.non_printing_characters_re.sub('''''' , UpperCAmelCase__ )
# Normalize whitespaces
UpperCamelCase__ : List[Any] =''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
UpperCamelCase__ : Tuple =unicodedata.normalize('''NFC''' , UpperCAmelCase__ )
return text
def _lowerCAmelCase ( self : Dict , lowercase_ : str , **lowercase_ : Optional[int] ):
UpperCamelCase__ : Optional[Any] =self.preprocess_text(UpperCAmelCase__ )
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def _lowerCAmelCase ( self : Optional[Any] , lowercase_ : str ):
return self.sp_model.PieceToId(UpperCAmelCase__ )
def _lowerCAmelCase ( self : Optional[Any] , lowercase_ : int ):
return self.sp_model.IdToPiece(UpperCAmelCase__ )
@staticmethod
def _lowerCAmelCase ( lowercase_ : str ):
return out_string
def _lowerCAmelCase ( self : Any , lowercase_ : List[str] ):
UpperCamelCase__ : List[Any] =[]
UpperCamelCase__ : List[Any] =''''''
UpperCamelCase__ : Tuple =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase__ ) + token
UpperCamelCase__ : List[Any] =True
UpperCamelCase__ : int =[]
else:
current_sub_tokens.append(UpperCAmelCase__ )
UpperCamelCase__ : List[str] =False
out_string += self.sp_model.decode(UpperCAmelCase__ )
return out_string
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ : Optional[Any] ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ : List[Any] =os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , '''wb''' ) as fi:
UpperCamelCase__ : Dict =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def _lowerCAmelCase ( self : Optional[int] , lowercase_ : Union[str, List[str]] , lowercase_ : Union[str, bool] = False ):
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase__ : Optional[int] =self.preprocess_text(UpperCAmelCase__ )
UpperCamelCase__ : str =self.sp_model.encode(UpperCAmelCase__ )
else:
UpperCamelCase__ : str =[self.preprocess_text(UpperCAmelCase__ ) for t in text]
UpperCamelCase__ : List[str] =self.sp_model.encode(UpperCAmelCase__ )
if return_tensors is True or return_tensors == "pt":
UpperCamelCase__ : List[Any] =torch.tensor(UpperCAmelCase__ )
return token_ids
def _lowerCAmelCase ( self : Optional[int] , lowercase_ : Union[int, List[int]] ):
return self.sp_model.decode(UpperCAmelCase__ )
def _lowerCAmelCase ( self : Optional[Any] , lowercase_ : "Conversation" ):
UpperCamelCase__ : Any =[f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
UpperCamelCase__ : Any =(
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(UpperCAmelCase__ ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=UpperCAmelCase__ )
| 361 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE_ = 'ChineseCLIPImageProcessor'
SCREAMING_SNAKE_CASE_ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Tuple , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , **lowercase_ : Union[str, Any] ):
UpperCamelCase__ : List[str] =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase_ , )
UpperCamelCase__ : List[str] =kwargs.pop('''feature_extractor''' )
UpperCamelCase__ : List[Any] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase_ , lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.image_processor
def __call__( self : Optional[int] , lowercase_ : int=None , lowercase_ : Optional[int]=None , lowercase_ : int=None , **lowercase_ : Union[str, Any] ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCamelCase__ : Optional[int] =self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
UpperCamelCase__ : str =self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
UpperCamelCase__ : Optional[int] =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def _lowerCAmelCase ( self : Any , *lowercase_ : Any , **lowercase_ : Optional[int] ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def _lowerCAmelCase ( self : str , *lowercase_ : Dict , **lowercase_ : Union[str, Any] ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : List[str] =self.tokenizer.model_input_names
UpperCamelCase__ : List[str] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCAmelCase ( self : Any ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase_ , )
return self.image_processor_class
| 157 | 0 |
"""simple docstring"""
from PIL import Image
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(lowerCamelCase ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
lowerCAmelCase__ : Any = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 98 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
UpperCamelCase = None
UpperCamelCase = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
UpperCamelCase = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class _lowerCamelCase :
"""simple docstring"""
snake_case = True
snake_case = None
# Automatically constructed
snake_case = "PIL.Image.Image"
snake_case = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
snake_case = field(default="Image" , init=UpperCamelCase , repr=UpperCamelCase )
def __call__( self )->int:
'''simple docstring'''
return self.pa_type
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )->"PIL.Image.Image":
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
A_ : List[str] = {}
A_ , A_ : str = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
A_ : List[str] = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
A_ : List[str] = path.split('''::''' )[-1]
try:
A_ : int = string_to_dict(_SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )['''repo_id''']
A_ : Optional[int] = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
A_ : Any = None
with xopen(_SCREAMING_SNAKE_CASE , '''rb''' , use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
A_ : Optional[Any] = BytesIO(f.read() )
A_ : Dict = PIL.Image.open(bytes_ )
else:
A_ : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _snake_case ( self )->Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
A_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
A_ : List[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
A_ : List[str] = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
A_ : Tuple = storage.field('''bytes''' )
else:
A_ : Optional[int] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
A_ : Optional[Any] = storage.field('''path''' )
else:
A_ : Optional[int] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
A_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
A_ : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
A_ : str = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
A_ : List[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE , '''rb''' ) as f:
A_ : Any = f.read()
return bytes_
A_ : Dict = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A_ : List[Any] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
A_ : str = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def _SCREAMING_SNAKE_CASE ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
A_ : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Dict = BytesIO()
if image.format in list_image_compression_formats():
A_ : Tuple = image.format
else:
A_ : List[str] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(SCREAMING_SNAKE_CASE , format=SCREAMING_SNAKE_CASE )
return buffer.getvalue()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if hasattr(SCREAMING_SNAKE_CASE , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(SCREAMING_SNAKE_CASE )}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
A_ : Union[str, Any] = array.dtype
A_ : Dict = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
A_ : Any = dtype.kind
A_ : Any = dtype.itemsize
A_ : Dict = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
A_ : List[Any] = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
A_ : int = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
A_ : Any = dtype_byteorder + dtype_kind + str(SCREAMING_SNAKE_CASE )
A_ : Optional[int] = np.dtype(SCREAMING_SNAKE_CASE )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
A_ : Tuple = PIL.Image.fromarray(array.astype(SCREAMING_SNAKE_CASE ) )
return {"path": None, "bytes": image_to_bytes(SCREAMING_SNAKE_CASE )}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
A_ , A_ : Union[str, Any] = first_non_null_value(SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
A_ : Tuple = no_op_if_value_is_null(SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(SCREAMING_SNAKE_CASE ) for obj in objs]
elif isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ):
A_ : List[str] = no_op_if_value_is_null(SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(SCREAMING_SNAKE_CASE ) for obj in objs]
else:
return objs
else:
return objs
| 186 | 0 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
print('Loading config file...' )
def flatten_yaml_as_dict(UpperCamelCase__ : Dict , UpperCamelCase__ : Any="" , UpperCamelCase__ : Any="." ):
__lowerCamelCase = []
for k, v in d.items():
__lowerCamelCase = parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase__ , UpperCamelCase__ , sep=UpperCamelCase__ ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase__ )
__lowerCamelCase = argparse.Namespace()
with open(UpperCamelCase__ , 'r' ) as yaml_file:
try:
__lowerCamelCase = yaml.load(UpperCamelCase__ , Loader=yaml.FullLoader )
__lowerCamelCase = flatten_yaml_as_dict(UpperCamelCase__ )
for k, v in flat_cfg.items():
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(UpperCamelCase__ , str(UpperCamelCase__ ) ) )
return config
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> Dict:
"""simple docstring"""
__lowerCamelCase = MobileViTVaConfig()
__lowerCamelCase = False
# dataset
if task_name.startswith('imagenet1k_' ):
__lowerCamelCase = 1000
if int(task_name.strip().split('_' )[-1] ) == 384:
__lowerCamelCase = 384
else:
__lowerCamelCase = 256
__lowerCamelCase = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
__lowerCamelCase = 2_1000
if int(task_name.strip().split('_' )[-1] ) == 384:
__lowerCamelCase = 384
else:
__lowerCamelCase = 256
__lowerCamelCase = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
__lowerCamelCase = 151
__lowerCamelCase = 512
__lowerCamelCase = 'ade20k-id2label.json'
__lowerCamelCase = True
elif task_name.startswith('voc_' ):
__lowerCamelCase = 21
__lowerCamelCase = 512
__lowerCamelCase = 'pascal-voc-id2label.json'
__lowerCamelCase = True
# orig_config
__lowerCamelCase = load_orig_config_file(UpperCamelCase__ )
assert getattr(UpperCamelCase__ , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
__lowerCamelCase = getattr(UpperCamelCase__ , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(UpperCamelCase__ , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__lowerCamelCase = getattr(UpperCamelCase__ , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__lowerCamelCase = getattr(UpperCamelCase__ , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
__lowerCamelCase = getattr(UpperCamelCase__ , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
__lowerCamelCase = getattr(UpperCamelCase__ , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 )
__lowerCamelCase = getattr(UpperCamelCase__ , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
__lowerCamelCase = 'huggingface/label-files'
__lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = dct.pop(UpperCamelCase__ )
__lowerCamelCase = val
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False ) -> Any:
"""simple docstring"""
if base_model:
__lowerCamelCase = ''
else:
__lowerCamelCase = 'mobilevitv2.'
__lowerCamelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__lowerCamelCase = k[8:]
else:
__lowerCamelCase = k
if ".block." in k:
__lowerCamelCase = k_new.replace('.block.' , '.' )
if ".conv." in k:
__lowerCamelCase = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
__lowerCamelCase = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
__lowerCamelCase = k_new.replace('conv_1.' , F"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if F"""layer_{i}.""" in k:
__lowerCamelCase = k_new.replace(F"""layer_{i}.""" , F"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
__lowerCamelCase = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
__lowerCamelCase = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if F"""layer_{i}.0.""" in k:
__lowerCamelCase = k_new.replace(F"""layer_{i}.0.""" , F"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if F"""layer_{i}.1.local_rep.0.""" in k:
__lowerCamelCase = k_new.replace(F"""layer_{i}.1.local_rep.0.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if F"""layer_{i}.1.local_rep.1.""" in k:
__lowerCamelCase = k_new.replace(F"""layer_{i}.1.local_rep.1.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
__lowerCamelCase = [0, 1]
elif i == 4:
__lowerCamelCase = [0, 1, 2, 3]
elif i == 5:
__lowerCamelCase = [0, 1, 2]
for j in j_in:
if F"""layer_{i}.1.global_rep.{j}.""" in k:
__lowerCamelCase = k_new.replace(
F"""layer_{i}.1.global_rep.{j}.""" , F"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if F"""layer_{i}.1.global_rep.{j+1}.""" in k:
__lowerCamelCase = k_new.replace(
F"""layer_{i}.1.global_rep.{j+1}.""" , F"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if F"""layer_{i}.1.conv_proj.""" in k:
__lowerCamelCase = k_new.replace(F"""layer_{i}.1.conv_proj.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
__lowerCamelCase = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
__lowerCamelCase = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
__lowerCamelCase = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
__lowerCamelCase = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
__lowerCamelCase = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
__lowerCamelCase = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
__lowerCamelCase = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
__lowerCamelCase = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
__lowerCamelCase = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(UpperCamelCase__ )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
__lowerCamelCase = get_mobilevitva_config(UpperCamelCase__ , UpperCamelCase__ )
# load original state_dict
__lowerCamelCase = torch.load(UpperCamelCase__ , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
__lowerCamelCase = MobileViTVaForSemanticSegmentation(UpperCamelCase__ ).eval()
__lowerCamelCase = False
else:
__lowerCamelCase = MobileViTVaForImageClassification(UpperCamelCase__ ).eval()
__lowerCamelCase = False
# remove and rename some keys of load the original model
__lowerCamelCase = checkpoint
remove_unused_keys(UpperCamelCase__ )
__lowerCamelCase = create_rename_keys(UpperCamelCase__ , base_model=UpperCamelCase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load modified state_dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__lowerCamelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__lowerCamelCase = image_processor(images=prepare_img() , return_tensors='pt' )
__lowerCamelCase = model(**UpperCamelCase__ )
# verify classification model
if task_name.startswith('imagenet' ):
__lowerCamelCase = outputs.logits
__lowerCamelCase = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__lowerCamelCase = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] )
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
__A = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 348 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''mask2former'''
snake_case_ = ['''swin''']
snake_case_ = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1_024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2_048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12_544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Tuple:
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowerCamelCase = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = backbone_config.pop('model_type' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
__lowerCamelCase = backbone_config
__lowerCamelCase = feature_size
__lowerCamelCase = mask_feature_size
__lowerCamelCase = hidden_dim
__lowerCamelCase = encoder_feedforward_dim
__lowerCamelCase = activation_function
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = dim_feedforward
__lowerCamelCase = pre_norm
__lowerCamelCase = enforce_input_projection
__lowerCamelCase = common_stride
__lowerCamelCase = ignore_value
__lowerCamelCase = num_queries
__lowerCamelCase = no_object_weight
__lowerCamelCase = class_weight
__lowerCamelCase = mask_weight
__lowerCamelCase = dice_weight
__lowerCamelCase = train_num_points
__lowerCamelCase = oversample_ratio
__lowerCamelCase = importance_sample_ratio
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = feature_strides
__lowerCamelCase = output_auxiliary_logits
__lowerCamelCase = decoder_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def lowercase_ ( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
return cls(
backbone_config=lowerCamelCase__ , **lowerCamelCase__ , )
def lowercase_ ( self ) -> Dict[str, any]:
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 348 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _lowerCAmelCase ( lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = CpmAntTokenizer
__UpperCAmelCase : Tuple = False
def _lowercase ( self : Dict ):
super().setUp()
__lowercase = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__lowercase = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def _lowercase ( self : Any ):
__lowercase = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
__lowercase = '''今天天气真好!'''
__lowercase = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__lowercase = tokenizer.tokenize(__A )
self.assertListEqual(__A, __A )
__lowercase = '''今天天气真好!'''
__lowercase = [tokenizer.bos_token] + tokens
__lowercase = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ), __A )
__lowercase = tokenizer.decode(__A )
self.assertEqual(__A, __A )
| 17 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 336 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = 42
__magic_name__ = None
__magic_name__ = None
lowerCAmelCase : Any = namedtuple("""CoinsDistribResult""", """moves excess""")
def lowercase (_A ):
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(_A ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_A ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_A ) != count_coins(_A ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(_A ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = get_distrib(node.left )
_lowerCAmelCase , _lowerCAmelCase : int = get_distrib(node.right )
_lowerCAmelCase : int = 1 - left_distrib_excess
_lowerCAmelCase : int = 1 - right_distrib_excess
_lowerCAmelCase : str = (
left_distrib_moves
+ right_distrib_moves
+ abs(_A )
+ abs(_A )
)
_lowerCAmelCase : Any = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_A , _A )
return get_distrib(_A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 |
'''simple docstring'''
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
_lowerCAmelCase : int = 6
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Optional[int] = 1_9_0_1
_lowerCAmelCase : Optional[Any] = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
_lowerCAmelCase : List[str] = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
if month > 1_2:
year += 1
_lowerCAmelCase : Optional[int] = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 25 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if len(_UpperCAmelCase ) == 0:
return []
A_ , A_ : Optional[int] = min(_UpperCAmelCase ), max(_UpperCAmelCase )
A_ : Optional[int] = int(max_value - min_value ) + 1
A_ : list[list] = [[] for _ in range(_UpperCAmelCase )]
for i in my_list:
buckets[int(i - min_value )].append(_UpperCAmelCase )
return [v for bucket in buckets for v in sorted(_UpperCAmelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 167 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Any = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 167 | 1 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = 'hf-internal-testing/tiny-random-t5'
lowerCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(a )
lowerCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(a )
lowerCAmelCase__ : Optional[Any] = tokenizer('This is me' , return_tensors='pt' )
lowerCAmelCase__ : Any = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowerCAmelCase__ : str = model.generate(**a )
lowerCAmelCase__ : Tuple = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : int = AutoModelForSeqaSeqLM.from_pretrained(a )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowerCAmelCase__ : List[str] = model_reloaded.generate(**a )
self.assertTrue(torch.allclose(a , a ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = 'hf-internal-testing/tiny-random-t5'
lowerCAmelCase__ : int = AutoModelForSeqaSeqLM.from_pretrained(a )
lowerCAmelCase__ : int = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(a ):
model.save_pretrained(a )
lowerCAmelCase__ : str = model.reverse_bettertransformer()
model.save_pretrained(a ) | 307 |
import torch
from torch import nn
class A__ ( nn.Module ):
def __init__( self : Optional[int] , a : Union[str, Any] , a : str , a : str , a : List[Any] , a : List[Any]=1 , a : Tuple=False ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Dict = n_token
lowerCAmelCase__ : Any = d_embed
lowerCAmelCase__ : str = d_proj
lowerCAmelCase__ : int = cutoffs + [n_token]
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
lowerCAmelCase__ : str = div_val
lowerCAmelCase__ : Tuple = self.cutoffs[0]
lowerCAmelCase__ : Dict = len(self.cutoffs ) - 1
lowerCAmelCase__ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase__ : Optional[int] = nn.ModuleList()
lowerCAmelCase__ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
else:
self.out_projs.append(a )
self.out_layers.append(nn.Linear(a , a ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
self.out_layers.append(nn.Linear(a , r_idx - l_idx ) )
lowerCAmelCase__ : Tuple = keep_order
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : int , a : List[str] , a : str ):
'''simple docstring'''
if proj is None:
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase__ : int = nn.functional.linear(a , proj.t().contiguous() )
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowerCamelCase ( self : List[str] , a : List[Any] , a : Optional[int]=None , a : Tuple=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase__ : str = hidden[..., :-1, :].contiguous()
lowerCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous()
lowerCAmelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase__ : Tuple = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCAmelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase__ : Optional[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase__ : str = labels != -100
lowerCAmelCase__ : int = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : List[str] = (
-nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : Optional[Any] = self.out_layers[i].weight
lowerCAmelCase__ : Optional[int] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : List[Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(a , dim=1 )
if labels is None:
lowerCAmelCase__ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase__ : Tuple = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase__ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase__ : Tuple = labels.index_select(0 , a ) - l_idx
lowerCAmelCase__ : Any = head_logprob.index_select(0 , a )
lowerCAmelCase__ : Optional[int] = hidden.index_select(0 , a )
else:
lowerCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
lowerCAmelCase__ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Optional[int] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase__ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
if self.n_clusters == 0:
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : str = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : int = self.out_layers[i].weight
lowerCAmelCase__ : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : str = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[str] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase__ : List[str] = logprob_i
return out | 307 | 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__lowerCAmelCase : Optional[int] ='src/diffusers'
# Matches is_xxx_available()
__lowerCAmelCase : str =re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
__lowerCAmelCase : Tuple =re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
__lowerCAmelCase : Union[str, Any] ='\n{0} = None\n'
__lowerCAmelCase : Optional[Any] ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
__lowerCAmelCase : List[Any] ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : str = _re_backend.findall(lowercase__ )
if len(lowercase__ ) == 0:
return None
return "_and_".join(lowercase__ )
def _UpperCamelCase ( ):
with open(os.path.join(lowercase__ , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = f.readlines()
# Get to the point we do the actual imports for type checking
__SCREAMING_SNAKE_CASE : List[str] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
# Go through the end of the file
while line_index < len(lowercase__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__SCREAMING_SNAKE_CASE : str = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
__SCREAMING_SNAKE_CASE : str = []
# Until we unindent, add backend objects to the list
while line_index < len(lowercase__ ) and len(lines[line_index] ) > 1:
__SCREAMING_SNAKE_CASE : Tuple = lines[line_index]
__SCREAMING_SNAKE_CASE : Optional[Any] = _re_single_line_import.search(lowercase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowercase__ ) > 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = objects
else:
line_index += 1
return backend_specific_objects
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if name.isupper():
return DUMMY_CONSTANT.format(lowercase__ )
elif name.islower():
return DUMMY_FUNCTION.format(lowercase__ , lowercase__ )
else:
return DUMMY_CLASS.format(lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__=None ):
if backend_specific_objects is None:
__SCREAMING_SNAKE_CASE : List[str] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__SCREAMING_SNAKE_CASE : List[str] = {}
for backend, objects in backend_specific_objects.items():
__SCREAMING_SNAKE_CASE : int = '''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
__SCREAMING_SNAKE_CASE : List[str] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowercase__ , lowercase__ ) for o in objects] )
__SCREAMING_SNAKE_CASE : int = dummy_file
return dummy_files
def _UpperCamelCase ( lowercase__=False ):
__SCREAMING_SNAKE_CASE : List[str] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__SCREAMING_SNAKE_CASE : Optional[int] = {'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
__SCREAMING_SNAKE_CASE : Tuple = os.path.join(lowercase__ , '''utils''' )
__SCREAMING_SNAKE_CASE : str = {
backend: os.path.join(lowercase__ , F'''dummy_{short_names.get(lowercase__ , lowercase__ )}_objects.py''' )
for backend in dummy_files.keys()
}
__SCREAMING_SNAKE_CASE : Tuple = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowercase__ ):
with open(lowercase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = f.read()
else:
__SCREAMING_SNAKE_CASE : int = ''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(lowercase__ , lowercase__ )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'''diffusers.utils.dummy_{short_names.get(lowercase__ , lowercase__ )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__lowerCAmelCase : Optional[int] =parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 9 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCamelCase__ ( lowerCamelCase_ , lowerCamelCase_ ):
a__ : Tuple = 1
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE=2_000 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=20 , SCREAMING_SNAKE_CASE=1E-3 ):
"""simple docstring"""
snake_case : Optional[Any] = None
snake_case : List[str] = None
snake_case : Any = None
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
snake_case : Any = torch.linspace(1 , self.config.sampling_eps , SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
snake_case : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
snake_case : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
snake_case : List[str] = std.flatten()
while len(std.shape ) < len(score.shape ):
snake_case : Any = std.unsqueeze(-1 )
snake_case : Optional[Any] = -score / std
# compute
snake_case : int = -1.0 / len(self.timesteps )
snake_case : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
snake_case : str = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
snake_case : Union[str, Any] = beta_t.unsqueeze(-1 )
snake_case : Tuple = -0.5 * beta_t * x
snake_case : Tuple = torch.sqrt(SCREAMING_SNAKE_CASE )
snake_case : List[str] = drift - diffusion**2 * score
snake_case : List[str] = x + drift * dt
# add noise
snake_case : Optional[int] = randn_tensor(x.shape , layout=x.layout , generator=SCREAMING_SNAKE_CASE , device=x.device , dtype=x.dtype )
snake_case : Dict = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 148 | 0 |
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
snake_case : Any = 0
for i in range(1 ,1001 ):
total += i**i
return str(lowercase )[-10:]
if __name__ == "__main__":
print(solution())
| 176 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """efficientformer"""
def __init__( self , A = [3, 2, 6, 4] , A = [4_8, 9_6, 2_2_4, 4_4_8] , A = [True, True, True, True] , A = 4_4_8 , A = 3_2 , A = 4 , A = 7 , A = 5 , A = 8 , A = 4 , A = 0.0 , A = 1_6 , A = 3 , A = 3 , A = 3 , A = 2 , A = 1 , A = 0.0 , A = 1 , A = True , A = True , A = 1e-5 , A = "gelu" , A = 0.02 , A = 1e-1_2 , A = 2_2_4 , A = 1e-0_5 , **A , ) -> None:
super().__init__(**A )
snake_case : Dict = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Any = hidden_sizes
snake_case : Optional[Any] = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : List[Any] = initializer_range
snake_case : str = layer_norm_eps
snake_case : Dict = patch_size
snake_case : Optional[int] = num_channels
snake_case : int = depths
snake_case : Optional[int] = mlp_expansion_ratio
snake_case : Any = downsamples
snake_case : Dict = dim
snake_case : Optional[int] = key_dim
snake_case : Union[str, Any] = attention_ratio
snake_case : Any = resolution
snake_case : Dict = pool_size
snake_case : Any = downsample_patch_size
snake_case : Tuple = downsample_stride
snake_case : Any = downsample_pad
snake_case : Union[str, Any] = drop_path_rate
snake_case : List[str] = num_metaad_blocks
snake_case : Union[str, Any] = distillation
snake_case : List[str] = use_layer_scale
snake_case : int = layer_scale_init_value
snake_case : Union[str, Any] = image_size
snake_case : Dict = batch_norm_eps
| 176 | 1 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : int = {"vocab_file": "vocab.txt"}
_UpperCAmelCase : str = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
_UpperCAmelCase : Optional[Any] = {
"openbmb/cpm-ant-10b": 1_024,
}
def A ( lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = collections.OrderedDict()
with open(lowercase , 'r' , encoding='utf-8' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(lowercase ):
UpperCamelCase = token.rstrip('\n' )
UpperCamelCase = index
return vocab
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_="<unk>" , A_=200 ) -> Dict:
"""simple docstring"""
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = list(A_ )
if len(A_ ) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(A_ ):
UpperCamelCase = len(A_ )
UpperCamelCase = None
while start < end:
UpperCamelCase = ''.join(chars[start:end] )
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(A_ )
UpperCamelCase = end
return sub_tokens
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : List[str] = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ["input_ids", "attention_mask"]
__lowercase : Tuple = False
def __init__( self , A_ , A_="<d>" , A_="</d>" , A_="<s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="</n>" , A_="</_>" , A_="left" , **A_ , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=A_ , eod_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , unk_token=A_ , line_token=A_ , space_token=A_ , padding_side=A_ , **A_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(A_ )
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda A_ : x[1] ) )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = []
for x in jieba.cut(A_ , cut_all=A_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(A_ ) )
return output_tokens
def __UpperCamelCase ( self , A_ , **A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(A_ , **A_ )
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
return token in self.encoder
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
return "".join(A_ )
def __UpperCamelCase ( self , A_ ) -> Tuple:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
return self.decoder.get(A_ , self.unk_token )
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(A_ ):
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
UpperCamelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['\n']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda A_ : x[1] ) )
with open(A_ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
UpperCamelCase = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __UpperCamelCase ( self , A_ , A_ = None , A_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is not None:
return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ ))
return [1] + ([0] * len(A_ ))
| 222 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Union[str, Any] = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 222 | 1 |
from typing import Any
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : Optional[Any] =data
a__ : List[Any] =None
class __lowerCAmelCase :
def __init__( self ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple =None
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Optional[int] =self.head
while temp is not None:
print(temp.data , end=" " )
a__ : int =temp.next
print()
def _lowercase ( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
a__ : Union[str, Any] =Node(lowerCAmelCase__ )
a__ : str =self.head
a__ : List[str] =new_node
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
a__ : Optional[Any] =self.head
while node_a is not None and node_a.data != node_data_a:
a__ : List[Any] =node_a.next
a__ : str =self.head
while node_a is not None and node_a.data != node_data_a:
a__ : str =node_a.next
if node_a is None or node_a is None:
return
a__ : Any =node_a.data, node_a.data
if __name__ == "__main__":
UpperCAmelCase : int = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 354 |
def _A ( SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1_000 ):
"""simple docstring"""
a__ : Any =1
a__ : Any =0
for divide_by_number in range(SCREAMING_SNAKE_CASE , digit + 1 ):
a__ : list[int] =[]
a__ : int =numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE ):
a__ : Optional[Any] =len(SCREAMING_SNAKE_CASE )
a__ : List[str] =divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE )
a__ : List[Any] =now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148 | 0 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = current_set.copy()
for row_index, row in enumerate(__UpperCamelCase ):
__A = row[0]
for column_index, column in enumerate(__UpperCamelCase ):
if magnitude == 0:
__A = column
continue
__A = column / magnitude
# Subtract to cancel term
__A = current_set[0]
__A = [first_row]
__A = current_set[1::]
for row in current_set:
__A = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__UpperCamelCase )
continue
for column_index in range(len(__UpperCamelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__UpperCamelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
__A = final_set[0]
__A = []
__A = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
__A = simplify(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __UpperCamelCase )
__A = resultant
return final_set
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
__A = len(__UpperCamelCase ) + 1
if any(len(__UpperCamelCase ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(__UpperCamelCase , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(__UpperCamelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
__A = equations.copy()
if any(0 in row for row in data_set ):
__A = data_set.copy()
__A = []
for row_index, row in enumerate(__UpperCamelCase ):
if 0 not in row:
__A = data_set.pop(__UpperCamelCase )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , __UpperCamelCase )
__A = data_set.copy()
__A = simplify(__UpperCamelCase )
__A = simplified[::-1]
__A = []
for row in simplified:
__A = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
__A = row.copy()[: len(__UpperCamelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__UpperCamelCase ) == 0:
solutions.append(0 )
continue
__A = temp_row[1::]
__A = temp_row[::-1]
for column_index, column in enumerate(__UpperCamelCase ):
current_solution -= column * solutions[column_index]
solutions.append(__UpperCamelCase )
__A = []
for item in solutions:
final.append(float(round(__UpperCamelCase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 266 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowercase_ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowercase_ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowercase_ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), id='''references''' ),
} ), )
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[List[List[str]]], _lowerCamelCase : List[List[str]], _lowerCamelCase : int = 1, _lowerCamelCase : int = 4, ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_lowerCamelCase, hypotheses=_lowerCamelCase, min_len=_lowerCamelCase, max_len=_lowerCamelCase )
}
| 266 | 1 |
from __future__ import annotations
from typing import TypedDict
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __lowerCamelCase ( __magic_name__ : str ):
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__magic_name__ ) )]
def __lowerCamelCase ( __magic_name__ : str ):
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
a__: Optional[Any] =all_rotations(__magic_name__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
a__: BWTTransformDict ={
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__magic_name__ ),
}
return response
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : int ):
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
a__: Optional[Any] =int(__magic_name__ )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__magic_name__ ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
a__: Dict =[""] * len(__magic_name__ )
for _ in range(len(__magic_name__ ) ):
for i in range(len(__magic_name__ ) ):
a__: str =bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__UpperCAmelCase = '''Provide a string that I will generate its BWT transform: '''
__UpperCAmelCase = input(entry_msg).strip()
__UpperCAmelCase = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string '{s}' results """
f"""in '{result['bwt_string']}'"""
)
__UpperCAmelCase = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
f"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
f"""we get original string '{original_string}'"""
)
| 42 |
import os
def __lowerCamelCase ( __magic_name__ : str = "input.txt" ):
with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file:
a__: str =[
[int(__magic_name__ ) for element in line.split("," )]
for line in input_file.readlines()
]
a__: int =len(__magic_name__ )
a__: int =len(matrix[0] )
a__: Optional[Any] =[[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
a__: Dict =matrix[i][0]
for j in range(1 , __magic_name__ ):
for i in range(__magic_name__ ):
a__: int =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __magic_name__ ):
a__: Tuple =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
a__: Tuple =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 42 | 1 |
'''simple docstring'''
from functools import reduce
a_ = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _a( UpperCamelCase__ : List[Any] = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCamelCase__, UpperCamelCase__ : str(int(lowerCamelCase__ ) * int(lowerCamelCase__ ) ), n[i : i + 1_3] ) )
for i in range(len(lowerCamelCase__ ) - 1_2 ) )
if __name__ == "__main__":
print(F'''{solution() = }''') | 152 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__A =pytest.mark.integration
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(lowercase ) for x in np.arange(30 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
lowerCamelCase_ = dset.map(
lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase , keep_in_memory=lowercase )
lowerCamelCase_ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(lowercase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
from elasticsearch import Elasticsearch
lowerCamelCase_ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCamelCase_ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=lowercase )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertRaises(lowercase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
self.assertRaises(lowercase , index.search_batch , queries[0] )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
import faiss
lowerCamelCase_ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowercase ):
lowerCamelCase_ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = faiss.IndexFlat(5 )
lowerCamelCase_ = FaissIndex(custom_index=lowercase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase_ ( lowerCamelCase__ ):
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase_ = "index.faiss"
lowerCamelCase_ = F'mock://{index_name}'
index.save(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = FaissIndex.load(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowerCamelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = Elasticsearch()
lowerCamelCase_ = {"acknowledged": True}
lowerCamelCase_ = ElasticSearchIndex(es_client=lowercase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
# batched queries with timeout
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase , request_timeout=30 )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
| 19 | 0 |
from __future__ import annotations
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> bool:
lowercase : Union[str, Any] = get_failure_array(__snake_case )
# 2) Step through text searching for pattern
lowercase : Optional[Any] = 0, 0 # index into text, pattern
while i < len(__snake_case ):
if pattern[j] == text[i]:
if j == (len(__snake_case ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowercase : str = failure[j - 1]
continue
i += 1
return False
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> list[int]:
lowercase : str = [0]
lowercase : Any = 0
lowercase : Optional[int] = 1
while j < len(__snake_case ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowercase : Any = failure[i - 1]
continue
j += 1
failure.append(__snake_case )
return failure
if __name__ == "__main__":
# Test 1)
lowercase : Tuple = 'abc1abc12'
lowercase : Union[str, Any] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
lowercase : Dict = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowercase : List[str] = 'ABABX'
lowercase : int = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
lowercase : List[Any] = 'AAAB'
lowercase : Union[str, Any] = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
lowercase : List[str] = 'abcdabcy'
lowercase : str = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
lowercase : List[str] = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 364 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowercase : Tuple = get_logger(__name__)
lowercase : Optional[int] = Path(__file__).parent / """model_card_template.md"""
lowercase : Dict = uuida().hex
lowercase : Tuple = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
lowercase : str = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
lowercase : Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def _snake_case( SCREAMING_SNAKE_CASE__ = None ) -> str:
lowercase : str = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_flax_available():
ua += f"; jax/{_jax_version}"
ua += f"; flax/{_flax_version}"
if is_onnx_available():
ua += f"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + user_agent
return ua
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ) -> Dict:
if token is None:
lowercase : Optional[int] = HfFolder.get_token()
if organization is None:
lowercase : int = whoami(SCREAMING_SNAKE_CASE__ )["""name"""]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(SCREAMING_SNAKE_CASE__ , """local_rank""" ) and args.local_rank not in [-1, 0]:
return
lowercase : str = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , """hub_token""" ) else None
lowercase : int = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , """gradient_accumulation_steps""" ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , )
lowercase : str = os.path.join(args.output_dir , """README.md""" )
model_card.save(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Optional[Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
lowercase : List[Any] = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() )
lowercase : Any = re.search(R"""snapshots/([^/]+)/""" , SCREAMING_SNAKE_CASE__ )
if search is None:
return None
lowercase : List[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowercase : Optional[Any] = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
lowercase : Optional[int] = os.path.join(hf_cache_home, """diffusers""")
def _snake_case( SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ) -> None:
if new_cache_dir is None:
lowercase : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
lowercase : List[str] = old_diffusers_cache
lowercase : Dict = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
lowercase : int = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowercase : Any = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowercase : Dict = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
lowercase : Any = 0
else:
with open(cache_version_file) as f:
try:
lowercase : List[Any] = int(f.read())
except ValueError:
lowercase : int = 0
if cache_version < 1:
lowercase : Union[str, Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
lowercase : int = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
"""the directory exists and can be written to."""
)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> str:
if variant is not None:
lowercase : List[str] = weights_name.split(""".""" )
lowercase : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
lowercase : int = """.""".join(SCREAMING_SNAKE_CASE__ )
return weights_name
def _snake_case( SCREAMING_SNAKE_CASE__ , *,
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ) -> Optional[Any]:
lowercase : Optional[int] = str(SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE__ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
# Load from a PyTorch checkpoint
lowercase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
lowercase : Any = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
else:
raise EnvironmentError(
f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse("""0.20.0""" )
):
try:
lowercase : Any = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
warnings.warn(
f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , SCREAMING_SNAKE_CASE__ , )
return model_file
except: # noqa: E722
warnings.warn(
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}' so that the correct variant file can be added." , SCREAMING_SNAKE_CASE__ , )
try:
# 2. Load model file as usual
lowercase : int = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"""listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
"""this model name. Check the model page at """
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
f" directory containing a file named {weights_name} or"
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" )
except EnvironmentError:
raise EnvironmentError(
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"""'https://huggingface.co/models', make sure you don't have a local directory with the same name. """
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a file named {weights_name}" )
| 285 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = '''▁'''
__lowerCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__lowerCAmelCase = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
__lowerCAmelCase = {
'''facebook/m2m100_418M''': 1_024,
}
# fmt: off
__lowerCAmelCase = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Union[str, Any] = ['input_ids', 'attention_mask']
lowerCAmelCase : List[int] = []
lowerCAmelCase : List[int] = []
def __init__( self : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : Union[str, Any]="<s>" ,_UpperCAmelCase : Union[str, Any]="</s>" ,_UpperCAmelCase : int="</s>" ,_UpperCAmelCase : Tuple="<pad>" ,_UpperCAmelCase : str="<unk>" ,_UpperCAmelCase : Tuple="m2m100" ,_UpperCAmelCase : Optional[Dict[str, Any]] = None ,_UpperCAmelCase : List[Any]=8 ,**_UpperCAmelCase : Union[str, Any] ,):
_a : int = {} if sp_model_kwargs is None else sp_model_kwargs
_a : int = language_codes
_a : List[Any] = FAIRSEQ_LANGUAGE_CODES[language_codes]
_a : Union[str, Any] = {lang_code: F"""__{lang_code}__""" for lang_code in fairseq_language_code}
_a : List[str] = kwargs.get('additional_special_tokens' ,[] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(_UpperCAmelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(_UpperCAmelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_UpperCAmelCase ,tgt_lang=_UpperCAmelCase ,bos_token=_UpperCAmelCase ,eos_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,language_codes=_UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,num_madeup_words=_UpperCAmelCase ,**_UpperCAmelCase ,)
_a : Any = vocab_file
_a : int = load_json(_UpperCAmelCase )
_a : Dict = {v: k for k, v in self.encoder.items()}
_a : int = spm_file
_a : int = load_spm(_UpperCAmelCase ,self.sp_model_kwargs )
_a : Any = len(self.encoder )
_a : str = {
self.get_lang_token(_UpperCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(_UpperCAmelCase )
}
_a : Optional[int] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_UpperCAmelCase )}
_a : Dict = {v: k for k, v in self.lang_token_to_id.items()}
_a : Union[str, Any] = src_lang if src_lang is not None else 'en'
_a : Optional[int] = tgt_lang
_a : Optional[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_a : List[Any] = num_madeup_words
@property
def __lowercase ( self : int ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __lowercase ( self : Optional[int] ):
return self._src_lang
@src_lang.setter
def __lowercase ( self : str ,_UpperCAmelCase : str ):
_a : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : str ):
return self.sp_model.encode(_UpperCAmelCase ,out_type=_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : int ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_UpperCAmelCase ,self.encoder[self.unk_token] )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : int ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_UpperCAmelCase ,self.unk_token )
def __lowercase ( self : int ,_UpperCAmelCase : Dict ):
_a : Dict = []
_a : List[str] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
_a : Optional[Any] = []
else:
current_sub_tokens.append(_UpperCAmelCase )
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ,_UpperCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase ,token_ids_a=_UpperCAmelCase ,already_has_special_tokens=_UpperCAmelCase )
_a : List[str] = [1] * len(self.prefix_tokens )
_a : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowercase ( self : List[Any] ):
_a : Dict = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
_a : Dict = self.__dict__.copy()
_a : Any = None
return state
def __setstate__( self : Union[str, Any] ,_UpperCAmelCase : Dict ):
_a : List[Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : Union[str, Any] = {}
_a : Any = load_spm(self.spm_file ,self.sp_model_kwargs )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ):
_a : List[str] = Path(_UpperCAmelCase )
if not save_dir.is_dir():
raise OSError(F"""{save_directory} should be a directory""" )
_a : int = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
_a : Tuple = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder ,_UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file ,_UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase ,'wb' ) as fi:
_a : Tuple = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def __lowercase ( self : List[str] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : str = "en" ,_UpperCAmelCase : Optional[List[str]] = None ,_UpperCAmelCase : str = "ro" ,**_UpperCAmelCase : List[Any] ,):
_a : int = src_lang
_a : List[str] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Dict ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[str] ,_UpperCAmelCase : Optional[str] ,**_UpperCAmelCase : List[Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_a : List[str] = src_lang
_a : Optional[int] = self(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,**_UpperCAmelCase )
_a : List[str] = self.get_lang_id(_UpperCAmelCase )
_a : List[str] = tgt_lang_id
return inputs
def __lowercase ( self : List[str] ):
self.set_src_lang_special_tokens(self.src_lang )
def __lowercase ( self : Any ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : str ):
_a : Tuple = self.get_lang_token(_UpperCAmelCase )
_a : Optional[Any] = self.lang_token_to_id[lang_token]
_a : Union[str, Any] = [self.cur_lang_id]
_a : Union[str, Any] = [self.eos_token_id]
def __lowercase ( self : List[str] ,_UpperCAmelCase : str ):
_a : int = self.get_lang_token(_UpperCAmelCase )
_a : Optional[Any] = self.lang_token_to_id[lang_token]
_a : Dict = [self.cur_lang_id]
_a : Optional[int] = [self.eos_token_id]
def __lowercase ( self : str ,_UpperCAmelCase : str ):
return self.lang_code_to_token[lang]
def __lowercase ( self : List[Any] ,_UpperCAmelCase : str ):
_a : Optional[int] = self.get_lang_token(_UpperCAmelCase )
return self.lang_token_to_id[lang_token]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> sentencepiece.SentencePieceProcessor:
_a : int = sentencepiece.SentencePieceProcessor(**lowerCAmelCase_ )
spm.Load(str(lowerCAmelCase_ ) )
return spm
def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[Dict, List]:
with open(lowerCAmelCase_ , 'r' ) as f:
return json.load(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
with open(lowerCAmelCase_ , 'w' ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ , indent=2 )
| 89 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( A_ ):
lowercase__ = ['''image_processor''', '''feature_extractor''']
lowercase__ = '''TvltImageProcessor'''
lowercase__ = '''TvltFeatureExtractor'''
def __init__( self : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] ) -> Dict:
'''simple docstring'''
super().__init__(image_processor=snake_case_ , feature_extractor=snake_case_ )
A__ = image_processor
A__ = feature_extractor
def __call__( self : List[Any] , snake_case_ : List[str]=None , snake_case_ : Dict=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : Dict=False , snake_case_ : Union[str, Any]=False , *snake_case_ : List[str] , **snake_case_ : List[Any] , ) -> List[str]:
'''simple docstring'''
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
A__ = None
if images is not None:
A__ = self.image_processor(snake_case_ , mask_pixel=snake_case_ , *snake_case_ , **snake_case_ )
if images_mixed is not None:
A__ = self.image_processor(snake_case_ , is_mixed=snake_case_ , *snake_case_ , **snake_case_ )
if audio is not None:
A__ = self.feature_extractor(
snake_case_ , *snake_case_ , sampling_rate=snake_case_ , mask_audio=snake_case_ , **snake_case_ )
A__ = {}
if audio is not None:
output_dict.update(snake_case_ )
if images is not None:
output_dict.update(snake_case_ )
if images_mixed_dict is not None:
output_dict.update(snake_case_ )
return output_dict
@property
def __magic_name__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ = self.image_processor.model_input_names
A__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 247 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowercase ( A_ )-> List[str]:
'''simple docstring'''
a , a : int = analyze_text(__a )
a : Optional[int] = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
a : Optional[Any] = sum(single_char_strings.values() )
# one length string
a : Any = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
a : Dict = single_char_strings[ch]
a : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(__a ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
a : Any = sum(two_char_strings.values() )
a : Union[str, Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
a : List[Any] = cha + cha
if sequence in two_char_strings:
a : Union[str, Any] = two_char_strings[sequence]
a : Any = int(__a ) / all_sum
my_sec_sum += prob * math.loga(__a )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : str = Counter() # type: ignore
a : Optional[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__a ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowercase ( )-> List[str]:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 350 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = """longformer"""
def __init__( self : List[Any] , __UpperCAmelCase : Union[List[int], int] = 512 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 30522 , __UpperCAmelCase : int = 768 , __UpperCAmelCase : int = 12 , __UpperCAmelCase : int = 12 , __UpperCAmelCase : int = 3072 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : float = 1e-12 , __UpperCAmelCase : bool = False , **__UpperCAmelCase : int , ):
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : int = attention_window
a : List[str] = sep_token_id
a : List[Any] = bos_token_id
a : int = eos_token_id
a : Union[str, Any] = vocab_size
a : Dict = hidden_size
a : int = num_hidden_layers
a : int = num_attention_heads
a : Optional[int] = hidden_act
a : Union[str, Any] = intermediate_size
a : Tuple = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : str = max_position_embeddings
a : Dict = type_vocab_size
a : Optional[Any] = initializer_range
a : List[Any] = layer_norm_eps
a : List[str] = onnx_export
class _A ( _a ):
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : "PretrainedConfig" , __UpperCAmelCase : str = "default" , __UpperCAmelCase : "List[PatchingSpec]" = None):
super().__init__(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = True
@property
def __snake_case ( self : int):
if self.task == "multiple-choice":
a : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
a : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
])
@property
def __snake_case ( self : Union[str, Any]):
a : str = super().outputs
if self.task == "default":
a : Tuple = {0: "batch"}
return outputs
@property
def __snake_case ( self : Optional[int]):
return 1e-4
@property
def __snake_case ( self : str):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14)
def __snake_case ( self : List[str] , __UpperCAmelCase : "PreTrainedTokenizerBase" , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[TensorType] = None , ):
a : str = super().generate_dummy_inputs(
preprocessor=__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase)
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
a : Union[str, Any] = torch.zeros_like(inputs["input_ids"])
# make every second token global
a : Dict = 1
return inputs
| 226 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: Tuple=False ) -> List[Any]:
UpperCamelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ : str = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Dict=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ : Optional[Any] = ''''''
else:
UpperCamelCase__ : List[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : List[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCamelCase__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : str = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ : Dict = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> List[str]:
UpperCamelCase__ : int = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: int ) -> List[Any]:
UpperCamelCase__ : int = dct.pop(UpperCAmelCase_ )
UpperCamelCase__ : List[str] = val
def lowerCAmelCase_ ( ) -> List[Any]:
UpperCamelCase__ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : int = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __UpperCAmelCase: Any , __UpperCAmelCase: Optional[int] ) -> Optional[Any]:
UpperCamelCase__ : Tuple = ViTConfig()
UpperCamelCase__ : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
UpperCamelCase__ : Tuple = True
UpperCamelCase__ : List[Any] = int(vit_name[-12:-10] )
UpperCamelCase__ : Dict = int(vit_name[-9:-6] )
else:
UpperCamelCase__ : Dict = 1000
UpperCamelCase__ : Dict = '''huggingface/label-files'''
UpperCamelCase__ : Any = '''imagenet-1k-id2label.json'''
UpperCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ : List[Any] = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
UpperCamelCase__ : Tuple = idalabel
UpperCamelCase__ : Dict = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Optional[int] = int(vit_name[-6:-4] )
UpperCamelCase__ : Dict = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
UpperCamelCase__ : List[Any] = 192
UpperCamelCase__ : List[Any] = 768
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : Dict = 3
elif vit_name[9:].startswith('''small''' ):
UpperCamelCase__ : List[str] = 384
UpperCamelCase__ : Union[str, Any] = 1536
UpperCamelCase__ : Optional[int] = 12
UpperCamelCase__ : Union[str, Any] = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
UpperCamelCase__ : Tuple = 768
UpperCamelCase__ : List[Any] = 2304
UpperCamelCase__ : Optional[Any] = 8
UpperCamelCase__ : Optional[Any] = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
UpperCamelCase__ : Optional[int] = 1024
UpperCamelCase__ : List[str] = 4096
UpperCamelCase__ : Optional[Any] = 24
UpperCamelCase__ : Optional[Any] = 16
elif vit_name[4:].startswith('''huge''' ):
UpperCamelCase__ : Optional[int] = 1280
UpperCamelCase__ : Union[str, Any] = 5120
UpperCamelCase__ : Optional[Any] = 32
UpperCamelCase__ : str = 16
# load original model from timm
UpperCamelCase__ : Optional[Any] = timm.create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ : Tuple = timm_model.state_dict()
if base_model:
remove_classification_head_(UpperCAmelCase_ )
UpperCamelCase__ : List[str] = create_rename_keys(UpperCAmelCase_ , UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCamelCase__ : Optional[Any] = ViTModel(UpperCAmelCase_ ).eval()
else:
UpperCamelCase__ : str = ViTForImageClassification(UpperCAmelCase_ ).eval()
model.load_state_dict(UpperCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
UpperCamelCase__ : Tuple = DeiTImageProcessor(size=config.image_size )
else:
UpperCamelCase__ : Union[str, Any] = ViTImageProcessor(size=config.image_size )
UpperCamelCase__ : int = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCamelCase__ : Dict = encoding['''pixel_values''']
UpperCamelCase__ : Optional[Any] = model(UpperCAmelCase_ )
if base_model:
UpperCamelCase__ : List[str] = timm_model.forward_features(UpperCAmelCase_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(UpperCAmelCase_ , outputs.pooler_output , atol=1e-3 )
else:
UpperCamelCase__ : Union[str, Any] = timm_model(UpperCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase_ , outputs.logits , atol=1e-3 )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 201 |
'''simple docstring'''
a_ : str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
a_ : Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
a_ : int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 55 | 0 |
"""simple docstring"""
import os
def _a ( ):
"""simple docstring"""
with open(os.path.dirname(_snake_case ) + """/p022_names.txt""" ) as file:
UpperCAmelCase = str(file.readlines()[0] )
UpperCAmelCase = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
UpperCAmelCase = 0
UpperCAmelCase = 0
for i, name in enumerate(_snake_case ):
for letter in name:
name_score += ord(_snake_case ) - 64
total_score += (i + 1) * name_score
UpperCAmelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 234 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_UpperCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
_UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = R""".*sequential.(\d+).*"""
UpperCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase = key.replace(_snake_case , _snake_case )
if re.match(_snake_case , _snake_case ):
# replace sequential layers with list
UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 )
UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' )
elif re.match(_snake_case , _snake_case ):
UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase = 1 if projecton_layer == 0 else 2
UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase = value
UpperCAmelCase = mixed_qkv.size(0 ) // 3
UpperCAmelCase = mixed_qkv[:qkv_dim]
UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase = query_layer
UpperCAmelCase = key_layer
UpperCAmelCase = value_layer
else:
UpperCAmelCase = value
return model_state_dict
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case )
clap_model.eval()
UpperCAmelCase = clap_model.state_dict()
UpperCAmelCase = rename_state_dict(_snake_case )
UpperCAmelCase = ClapConfig()
UpperCAmelCase = enable_fusion
UpperCAmelCase = ClapModel(_snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(_snake_case , strict=_snake_case )
model.save_pretrained(_snake_case )
transformers_config.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
_UpperCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 234 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _a ( lowerCamelCase: Optional[int] ) -> Dict:
'''simple docstring'''
__A = SwinConfig(image_size=1_92 )
if "base" in model_name:
__A = 6
__A = 1_28
__A = (2, 2, 18, 2)
__A = (4, 8, 16, 32)
elif "large" in model_name:
__A = 12
__A = 1_92
__A = (2, 2, 18, 2)
__A = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
__A = window_size
__A = embed_dim
__A = depths
__A = num_heads
return config
def _a ( lowerCamelCase: Optional[Any] ) -> int:
'''simple docstring'''
if "encoder.mask_token" in name:
__A = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
__A = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
__A = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
__A = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__A = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__A = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__A = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__A = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__A = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__A = "layernorm.weight"
if name == "encoder.norm.bias":
__A = "layernorm.bias"
if "decoder" in name:
pass
else:
__A = "swin." + name
return name
def _a ( lowerCamelCase: Any , lowerCamelCase: List[Any] ) -> Optional[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__A = orig_state_dict.pop(_lowerCamelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
__A = key.split('''.''' )
__A = int(key_split[2] )
__A = int(key_split[4] )
__A = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__A = val[:dim, :]
__A = val[
dim : dim * 2, :
]
__A = val[-dim:, :]
else:
__A = val[
:dim
]
__A = val[
dim : dim * 2
]
__A = val[
-dim:
]
else:
__A = val
return orig_state_dict
def _a ( lowerCamelCase: Dict , lowerCamelCase: Dict , lowerCamelCase: Optional[int] , lowerCamelCase: int ) -> List[Any]:
'''simple docstring'''
__A = torch.load(_lowerCamelCase , map_location='''cpu''' )["model"]
__A = get_swin_config(_lowerCamelCase )
__A = SwinForMaskedImageModeling(_lowerCamelCase )
model.eval()
__A = convert_state_dict(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
__A = "http://images.cocodataset.org/val2017/000000039769.jpg"
__A = ViTImageProcessor(size={'''height''': 1_92, '''width''': 1_92} )
__A = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
__A = image_processor(images=_lowerCamelCase , return_tensors='''pt''' )
with torch.no_grad():
__A = model(**_lowerCamelCase ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
snake_case__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
snake_case__ : Any = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 117 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( a):
def __get__( self, __a, __a=None):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
_lowerCAmelCase : List[Any] = "__cached_" + self.fget.__name__
_lowerCAmelCase : Dict = getattr(__a, __a, __a)
if cached is None:
_lowerCAmelCase : str = self.fget(__a)
setattr(__a, __a, __a)
return cached
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"invalid truth value {val!r}" )
def A ( _lowerCamelCase ):
'''simple docstring'''
if is_torch_fx_proxy(_lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(_lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_lowerCamelCase , np.ndarray )
def A ( _lowerCamelCase ):
'''simple docstring'''
return isinstance(_lowerCamelCase , np.ndarray )
def A ( _lowerCamelCase ):
'''simple docstring'''
return _is_numpy(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import torch
return isinstance(_lowerCamelCase , torch.Tensor )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import torch
return isinstance(_lowerCamelCase , torch.device )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import torch
if isinstance(_lowerCamelCase , _lowerCamelCase ):
if hasattr(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase )
else:
return False
return isinstance(_lowerCamelCase , torch.dtype )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import tensorflow as tf
return isinstance(_lowerCamelCase , tf.Tensor )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowerCamelCase , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(_lowerCamelCase )
return type(_lowerCamelCase ) == tf.Tensor
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(_lowerCamelCase , jnp.ndarray )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return [to_py_obj(_lowerCamelCase ) for o in obj]
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase ).tolist()
elif isinstance(_lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def A ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return np.array(_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase )
else:
return obj
class UpperCAmelCase_ ( a):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = fields(self)
# Safety and consistency checks
if not len(__a):
raise ValueError(f"{self.__class__.__name__} has no fields.")
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(f"{self.__class__.__name__} should not have more than one required field.")
_lowerCAmelCase : Dict = getattr(self, class_fields[0].name)
_lowerCAmelCase : str = all(getattr(self, field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(__a):
if isinstance(__a, __a):
_lowerCAmelCase : Tuple = first_field.items()
_lowerCAmelCase : Dict = True
else:
try:
_lowerCAmelCase : Dict = iter(__a)
_lowerCAmelCase : Any = True
except TypeError:
_lowerCAmelCase : Any = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__a):
if (
not isinstance(__a, (list, tuple))
or not len(__a) == 2
or not isinstance(element[0], __a)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_lowerCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"Cannot set key/value for {element}. It needs to be a tuple (key, value).")
break
setattr(self, element[0], element[1])
if element[1] is not None:
_lowerCAmelCase : Any = element[1]
elif first_field is not None:
_lowerCAmelCase : Any = first_field
else:
for field in class_fields:
_lowerCAmelCase : Dict = getattr(self, field.name)
if v is not None:
_lowerCAmelCase : Union[str, Any] = v
def __delitem__( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
def __getitem__( self, __a):
'''simple docstring'''
if isinstance(__a, __a):
_lowerCAmelCase : Optional[int] = dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self, __a, __a):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__a, __a)
super().__setattr__(__a, __a)
def __setitem__( self, __a, __a):
'''simple docstring'''
super().__setitem__(__a, __a)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
return tuple(self[k] for k in self.keys())
class UpperCAmelCase_ ( a , a):
@classmethod
def snake_case__ ( cls, __a):
'''simple docstring'''
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}")
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'longest'
lowerCamelCase__ = 'max_length'
lowerCamelCase__ = 'do_not_pad'
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'pt'
lowerCamelCase__ = 'tf'
lowerCamelCase__ = 'np'
lowerCamelCase__ = 'jax'
class UpperCAmelCase_ :
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = context_managers
_lowerCAmelCase : Dict = ExitStack()
def __enter__( self):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(__a)
def __exit__( self, *__a, **__a):
'''simple docstring'''
self.stack.__exit__(*__a, **__a)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = infer_framework(_lowerCamelCase )
if framework == "tf":
_lowerCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowerCAmelCase : str = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowerCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = model_class.__name__
_lowerCAmelCase : Optional[Any] = infer_framework(_lowerCamelCase )
if framework == "tf":
_lowerCAmelCase : Dict = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowerCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowerCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def A ( _lowerCamelCase , _lowerCamelCase = "" , _lowerCamelCase = "." ):
'''simple docstring'''
def _flatten_dict(_lowerCamelCase , _lowerCamelCase="" , _lowerCamelCase="." ):
for k, v in d.items():
_lowerCAmelCase : Dict = str(_lowerCamelCase ) + delimiter + str(_lowerCamelCase ) if parent_key else k
if v and isinstance(_lowerCamelCase , _lowerCamelCase ):
yield from flatten_dict(_lowerCamelCase , _lowerCamelCase , delimiter=_lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
@contextmanager
def A ( _lowerCamelCase , _lowerCamelCase = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.transpose(_lowerCamelCase , axes=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.T if axes is None else array.permute(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.transpose(_lowerCamelCase , perm=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.transpose(_lowerCamelCase , axes=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for transpose: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.reshape(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.reshape(_lowerCamelCase , _lowerCamelCase )
else:
raise ValueError(F"Type not supported for reshape: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for squeeze: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.expand_dims(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.unsqueeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.size(_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.numel()
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.size(_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return array.size
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(_lowerCamelCase , (tuple, list) ):
_lowerCAmelCase : List[Any] = [F"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
_lowerCAmelCase : Tuple = F"{repo_id}--{value}"
return auto_map
def A ( _lowerCamelCase ):
'''simple docstring'''
for base_class in inspect.getmro(_lowerCamelCase ):
_lowerCAmelCase : Tuple = base_class.__module__
_lowerCAmelCase : int = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"Could not infer framework from class {model_class}." )
| 36 | 0 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Optional[Any] , **_lowercase :Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["bs4"] )
super().__init__(**_A )
def UpperCAmelCase ( self :Optional[int] , _lowercase :str ):
'''simple docstring'''
lowercase__ = []
lowercase__ = []
lowercase__ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowercase__ = parent.find_all(child.name , recursive=_A )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) )
lowercase__ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCAmelCase ( self :Any , _lowercase :Optional[Any] ):
'''simple docstring'''
lowercase__ = BeautifulSoup(_A , "html.parser" )
lowercase__ = []
lowercase__ = []
lowercase__ = []
for element in html_code.descendants:
if type(_A ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowercase__ = html.unescape(_A ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_A )
lowercase__ = self.xpath_soup(_A )
stringaxtag_seq.append(_A )
stringaxsubs_seq.append(_A )
if len(_A ) != len(_A ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(_A ) != len(_A ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Dict , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = ''
for tagname, subs in zip(_A , _A ):
xpath += f'''/{tagname}'''
if subs != 0:
xpath += f'''[{subs}]'''
return xpath
def __call__( self :str , _lowercase :Union[str, Any] ):
'''simple docstring'''
lowercase__ = False
# Check that strings has a valid type
if isinstance(_A , _A ):
lowercase__ = True
elif isinstance(_A , (list, tuple) ):
if len(_A ) == 0 or isinstance(html_strings[0] , _A ):
lowercase__ = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
f'''but is of type {type(_A )}.''' )
lowercase__ = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) )
if not is_batched:
lowercase__ = [html_strings]
# Get nodes + xpaths
lowercase__ = []
lowercase__ = []
for html_string in html_strings:
lowercase__ = self.get_three_from_single(_A )
nodes.append(_A )
lowercase__ = []
for node, tag_list, sub_list in zip(_A , _A , _A ):
lowercase__ = self.construct_xpath(_A , _A )
xpath_strings.append(_A )
xpaths.append(_A )
# return as Dict
lowercase__ = {'nodes': nodes, 'xpaths': xpaths}
lowercase__ = BatchFeature(data=_A , tensor_type=_A )
return encoded_inputs
| 357 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _A ( __magic_name__ ):
lowercase__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
__lowerCamelCase = StableDiffusionLatentUpscalePipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCamelCase = frozenset([] )
__lowerCamelCase = True
@property
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = 1
lowercase__ = 4
lowercase__ = (16, 16)
lowercase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowercase )
return image
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=_lowercase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=_lowercase , only_cross_attention=_lowercase , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
lowercase__ = EulerDiscreteScheduler(prediction_type="sample" )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , )
lowercase__ = CLIPTextModel(_lowercase )
lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase__ = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCAmelCase ( self :Dict , _lowercase :Union[str, Any] , _lowercase :int=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("mps" ):
lowercase__ = torch.manual_seed(_lowercase )
else:
lowercase__ = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowercase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "cpu"
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase ).images
lowercase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
lowercase__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowercase , 1e-3 )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = 2
lowercase__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowercase__ = getattr(_lowercase , scheduler_enum.name )
lowercase__ = scheduler_cls.from_config(pipe.scheduler.config )
lowercase__ = pipe(**_lowercase )[0]
outputs.append(_lowercase )
assert check_same_shape(_lowercase )
@require_torch_gpu
@slow
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = torch.manual_seed(33 )
lowercase__ = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
lowercase__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
lowercase__ = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
lowercase__ = pipe(_lowercase , generator=_lowercase , output_type="latent" ).images
lowercase__ = upscaler(
prompt=_lowercase , image=_lowercase , num_inference_steps=20 , guidance_scale=0 , generator=_lowercase , output_type="np" , ).images[0]
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = torch.manual_seed(33 )
lowercase__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
lowercase__ = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
lowercase__ = upscaler(
prompt=_lowercase , image=_lowercase , num_inference_steps=20 , guidance_scale=0 , generator=_lowercase , output_type="np" , ).images[0]
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 201 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
_UpperCamelCase = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_UpperCamelCase = BASE_URL + '''/user'''
# https://github.com/settings/tokens
_UpperCamelCase = os.environ.get('USER_TOKEN', '')
def a_ ( _lowerCAmelCase ) -> Any:
__lowerCamelCase : str = {
'Authorization': F'token {auth_token}',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(_lowerCAmelCase ,headers=_lowerCAmelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 208 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : List[str] =logging.get_logger(__name__)
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class UpperCAmelCase ( snake_case_ ):
_lowercase: Any = ['''pixel_values''']
def __init__( self : Tuple , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , **__snake_case : str , ) -> None:
super().__init__(**__snake_case )
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 2_56}
_lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case )
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
_lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = resample
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = offset
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : int , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" in size:
_lowerCAmelCase = get_resize_output_image_size(__snake_case , size["""shortest_edge"""] , default_to_square=__snake_case )
elif "height" in size and "width" in size:
_lowerCAmelCase = (size["""height"""], size["""width"""])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__snake_case , size=(size["""height"""], size["""width"""]) , data_format=__snake_case , **__snake_case )
def lowercase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : bool = True , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> Dict:
_lowerCAmelCase = image.astype(np.floataa )
if offset:
_lowerCAmelCase = image - (scale / 2)
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase__ ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Tuple , ) -> np.ndarray:
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase__ ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase = to_numpy_array(__snake_case )
if do_resize:
_lowerCAmelCase = self.resize(image=__snake_case , size=__snake_case , resample=__snake_case )
if do_center_crop:
_lowerCAmelCase = self.center_crop(__snake_case , size=__snake_case )
if do_rescale:
_lowerCAmelCase = self.rescale(image=__snake_case , scale=__snake_case , offset=__snake_case )
if do_normalize:
_lowerCAmelCase = self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case )
_lowerCAmelCase = to_channel_dimension_format(__snake_case , __snake_case )
return image
def lowercase__ ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : List[str] , ) -> PIL.Image.Image:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = offset if offset is not None else self.offset
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" )
if not valid_images(__snake_case ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
_lowerCAmelCase = make_batched(__snake_case )
_lowerCAmelCase = [
[
self._preprocess_image(
image=__snake_case , do_resize=__snake_case , size=__snake_case , resample=__snake_case , do_center_crop=__snake_case , crop_size=__snake_case , do_rescale=__snake_case , rescale_factor=__snake_case , offset=__snake_case , do_normalize=__snake_case , image_mean=__snake_case , image_std=__snake_case , data_format=__snake_case , )
for img in video
]
for video in videos
]
_lowerCAmelCase = {"""pixel_values""": videos}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 70 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 352 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def UpperCamelCase ( snake_case__ : Accelerator , snake_case__ : int = 16 ) -> Dict:
UpperCamelCase : str = AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCamelCase : str = load_dataset('glue' , 'mrpc' )
def tokenize_function(snake_case__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase : Optional[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase : List[str] = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase : Dict = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase : str = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase : Dict = 8
else:
UpperCamelCase : Union[str, Any] = None
return tokenizer.pad(
snake_case__ , padding='longest' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase : Optional[int] = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
UpperCamelCase : List[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def UpperCamelCase ( snake_case__ : str , snake_case__ : List[Any] ) -> List[str]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , snake_case__ ) == "1":
UpperCamelCase : List[str] = 2
# New Code #
UpperCamelCase : Optional[int] = int(args.gradient_accumulation_steps )
UpperCamelCase : List[str] = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase : Union[str, Any] = config['lr']
UpperCamelCase : Optional[int] = int(config['num_epochs'] )
UpperCamelCase : List[Any] = int(config['seed'] )
UpperCamelCase : List[str] = int(config['batch_size'] )
UpperCamelCase : Optional[Any] = evaluate.load('glue' , 'mrpc' )
set_seed(snake_case__ )
UpperCamelCase , UpperCamelCase : int = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase : Tuple = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase : Dict = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase : List[str] = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
UpperCamelCase : Any = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
with LocalSGD(
accelerator=snake_case__ , model=snake_case__ , local_sgd_steps=snake_case__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case__ ):
UpperCamelCase : int = model(**snake_case__ )
UpperCamelCase : Union[str, Any] = output.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase : Dict = model(**snake_case__ )
UpperCamelCase : Any = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase : Any = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
UpperCamelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , snake_case__ )
def UpperCamelCase ( ) -> Dict:
UpperCamelCase : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=snake_case__ , default=snake_case__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=snake_case__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=snake_case__ , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
UpperCamelCase : str = parser.parse_args()
UpperCamelCase : Tuple = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 103 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.