code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 715 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = StableUnCLIPImgaImgPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def snake_case ( self : List[str] )-> str:
lowerCamelCase__ : Dict =32
lowerCamelCase__ : Optional[Any] =embedder_hidden_size
# image encoding components
lowerCamelCase__ : Dict =CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] =CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
lowerCamelCase__ : Dict =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase__ : Tuple =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
lowerCamelCase__ : Dict =UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =DDIMScheduler(
beta_schedule='''scaled_linear''', beta_start=0.00_085, beta_end=0.012, prediction_type='''v_prediction''', set_alpha_to_one=lowerCamelCase, steps_offset=1, )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =AutoencoderKL()
lowerCamelCase__ : int ={
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Any=0, lowerCamelCase : str=True )-> List[str]:
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : List[Any] =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : Dict =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
lowerCamelCase__ : int =input_image * 0.5 + 0.5
lowerCamelCase__ : Dict =input_image.clamp(0, 1 )
lowerCamelCase__ : List[str] =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase__ : Dict =DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def snake_case ( self : List[str] )-> Optional[Any]:
lowerCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : int =StableUnCLIPImgaImgPipeline(**lowerCamelCase )
lowerCamelCase__ : Any =sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase )
inputs.update({'''image_embeds''': None} )
lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase ).images
lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ : Union[str, Any] =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Tuple =torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def snake_case ( self : int )-> Optional[Any]:
lowerCamelCase__ : List[Any] =torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def snake_case ( self : List[str] )-> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : int =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Any =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : List[Any] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[int] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : str =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Tuple =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : Tuple =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : Any =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : List[Any] =pipe(
lowerCamelCase, '''anime turtle''', num_inference_steps=2, output_type='''np''', )
lowerCamelCase__ : Optional[int] =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 625 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Dict, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : str )-> Dict:
lowerCamelCase__ : Tuple =dataset
lowerCamelCase__ : Optional[Any] =process
lowerCamelCase__ : Any =params
def __len__( self : Any )-> Optional[Any]:
return len(self.dataset )
def __getitem__( self : str, lowerCamelCase : List[str] )-> List[Any]:
lowerCamelCase__ : str =self.dataset[i]
lowerCamelCase__ : List[str] =self.process(lowerCamelCase, **self.params )
return processed
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : Optional[Any], lowerCamelCase : Any=None )-> int:
lowerCamelCase__ : str =loader
lowerCamelCase__ : Union[str, Any] =infer
lowerCamelCase__ : Optional[int] =params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase__ : int =None
lowerCamelCase__ : Optional[Any] =loader_batch_size
# Internal bookkeeping
lowerCamelCase__ : Optional[Any] =None
lowerCamelCase__ : str =None
def __len__( self : Optional[int] )-> Tuple:
return len(self.loader )
def __iter__( self : Any )-> Optional[Any]:
lowerCamelCase__ : Optional[int] =iter(self.loader )
return self
def snake_case ( self : List[str] )-> Optional[Any]:
if isinstance(self._loader_batch_data, torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase__ : Dict =self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase__ : Optional[int] ={}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCamelCase, lowerCamelCase ):
# Convert ModelOutput to tuple first
lowerCamelCase__ : Optional[int] =element.to_tuple()
if isinstance(element[0], torch.Tensor ):
lowerCamelCase__ : Any =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0], np.ndarray ):
lowerCamelCase__ : Union[str, Any] =tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase, lowerCamelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0], torch.Tensor ):
lowerCamelCase__ : List[str] =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0], np.ndarray ):
lowerCamelCase__ : Tuple =tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase__ : List[Any] =None
elif isinstance(element[self._loader_batch_index], torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : str =element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index], np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase__ : Union[str, Any] =np.expand_dims(element[self._loader_batch_index], 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase__ : Optional[int] =element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase__ : Optional[int] =self._loader_batch_data.__class__(lowerCamelCase )
self._loader_batch_index += 1
return result
def snake_case ( self : str )-> int:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase__ : Union[str, Any] =next(self.iterator )
lowerCamelCase__ : List[str] =self.infer(lowerCamelCase, **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCamelCase, torch.Tensor ):
lowerCamelCase__ : Union[str, Any] =processed
else:
lowerCamelCase__ : Union[str, Any] =list(processed.keys() )[0]
lowerCamelCase__ : List[str] =processed[key]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Dict =len(lowerCamelCase )
else:
lowerCamelCase__ : Optional[Any] =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : List[Any] =observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase__ : Any =processed
lowerCamelCase__ : Any =0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Optional[int]=None )-> List[str]:
super().__init__(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def __iter__( self : int )-> int:
lowerCamelCase__ : List[Any] =iter(self.loader )
lowerCamelCase__ : Optional[int] =None
return self
def snake_case ( self : List[Any] )-> List[str]:
if self.subiterator is None:
lowerCamelCase__ : List[str] =self.infer(next(self.iterator ), **self.params )
try:
# Try to return next item
lowerCamelCase__ : str =next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase__ : int =self.infer(next(self.iterator ), **self.params )
lowerCamelCase__ : Union[str, Any] =next(self.subiterator )
return processed
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __iter__( self : int )-> Union[str, Any]:
lowerCamelCase__ : int =iter(self.loader )
return self
def snake_case ( self : List[Any] )-> List[str]:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase__ : Dict =False
lowerCamelCase__ : str =[]
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : Tuple =self.loader_batch_item()
lowerCamelCase__ : List[Any] =item.pop('''is_last''' )
accumulator.append(lowerCamelCase )
if is_last:
return accumulator
while not is_last:
lowerCamelCase__ : Optional[int] =self.infer(next(self.iterator ), **self.params )
if self.loader_batch_size is not None:
if isinstance(lowerCamelCase, torch.Tensor ):
lowerCamelCase__ : Optional[int] =processed
else:
lowerCamelCase__ : Any =list(processed.keys() )[0]
lowerCamelCase__ : Union[str, Any] =processed[key]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[Any] =len(lowerCamelCase )
else:
lowerCamelCase__ : Any =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase__ : Optional[int] =observed_batch_size
lowerCamelCase__ : Optional[int] =processed
lowerCamelCase__ : Dict =0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase__ : Tuple =self.loader_batch_item()
lowerCamelCase__ : str =item.pop('''is_last''' )
accumulator.append(lowerCamelCase )
if is_last:
return accumulator
else:
lowerCamelCase__ : str =processed
lowerCamelCase__ : Dict =item.pop('''is_last''' )
accumulator.append(lowerCamelCase )
return accumulator
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : Dataset, lowerCamelCase : str )-> Union[str, Any]:
lowerCamelCase__ : int =dataset
lowerCamelCase__ : Optional[Any] =key
def __len__( self : int )-> Optional[Any]:
return len(self.dataset )
def __getitem__( self : Tuple, lowerCamelCase : List[str] )-> List[str]:
return self.dataset[i][self.key]
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str], lowerCamelCase : Dataset, lowerCamelCase : str, lowerCamelCase : str )-> str:
lowerCamelCase__ : str =dataset
lowerCamelCase__ : Tuple =keya
lowerCamelCase__ : Any =keya
def __len__( self : Optional[int] )-> List[Any]:
return len(self.dataset )
def __getitem__( self : Union[str, Any], lowerCamelCase : Tuple )-> Union[str, Any]:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 716 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 4000000 ):
"""simple docstring"""
lowerCamelCase__ : Dict =[]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =b, a + b
return sum(__lowerCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 625 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowercase : Optional[int] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
for attribute in key.split('''.''' ):
lowerCamelCase__ : Dict =getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
lowerCamelCase__ : Tuple =getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
lowerCamelCase__ : int =hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCamelCase__ : List[str] =value
elif weight_type == "weight_g":
lowerCamelCase__ : List[Any] =value
elif weight_type == "weight_v":
lowerCamelCase__ : str =value
elif weight_type == "bias":
lowerCamelCase__ : List[Any] =value
else:
lowerCamelCase__ : Optional[int] =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Any ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[]
lowerCamelCase__ : Dict =fairseq_model.state_dict()
lowerCamelCase__ : Optional[int] =hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ : Union[str, Any] =False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
lowerCamelCase__ : List[str] =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__ : Tuple ='''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCamelCase__ : Dict =True
if "*" in mapped_key:
lowerCamelCase__ : Any =name.split(__lowerCamelCase )[0].split('''.''' )[-2]
lowerCamelCase__ : Optional[Any] =mapped_key.replace('''*''' , __lowerCamelCase )
if "weight_g" in name:
lowerCamelCase__ : Optional[Any] ='''weight_g'''
elif "weight_v" in name:
lowerCamelCase__ : Dict ='''weight_v'''
elif "weight" in name:
lowerCamelCase__ : Union[str, Any] ='''weight'''
elif "bias" in name:
lowerCamelCase__ : Tuple ='''bias'''
else:
lowerCamelCase__ : Tuple =None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =full_name.split('''conv_layers.''' )[-1]
lowerCamelCase__ : List[str] =name.split('''.''' )
lowerCamelCase__ : Union[str, Any] =int(items[0] )
lowerCamelCase__ : Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCamelCase__ : Optional[int] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCamelCase__ : Optional[int] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCamelCase__ : List[str] =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCamelCase__ : Any =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : int =SEWConfig()
if is_finetuned:
lowerCamelCase__ : Any =model.wav_encoder.wav_model.cfg
else:
lowerCamelCase__ : Optional[int] =model.cfg
lowerCamelCase__ : Dict =fs_config.conv_bias
lowerCamelCase__ : List[str] =eval(fs_config.conv_feature_layers )
lowerCamelCase__ : List[str] =[x[0] for x in conv_layers]
lowerCamelCase__ : Tuple =[x[1] for x in conv_layers]
lowerCamelCase__ : Optional[int] =[x[2] for x in conv_layers]
lowerCamelCase__ : int ='''gelu'''
lowerCamelCase__ : Union[str, Any] ='''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
lowerCamelCase__ : Union[str, Any] =0.0
lowerCamelCase__ : Union[str, Any] =fs_config.activation_fn.name
lowerCamelCase__ : Union[str, Any] =fs_config.encoder_embed_dim
lowerCamelCase__ : Any =0.02
lowerCamelCase__ : Any =fs_config.encoder_ffn_embed_dim
lowerCamelCase__ : Any =1e-5
lowerCamelCase__ : int =fs_config.encoder_layerdrop
lowerCamelCase__ : int =fs_config.encoder_attention_heads
lowerCamelCase__ : str =fs_config.conv_pos_groups
lowerCamelCase__ : Any =fs_config.conv_pos
lowerCamelCase__ : Optional[Any] =len(__lowerCamelCase )
lowerCamelCase__ : str =fs_config.encoder_layers
lowerCamelCase__ : List[Any] =fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowerCamelCase__ : Union[str, Any] =model.cfg
lowerCamelCase__ : Union[str, Any] =fs_config.final_dropout
lowerCamelCase__ : str =fs_config.layerdrop
lowerCamelCase__ : str =fs_config.activation_dropout
lowerCamelCase__ : int =fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowerCamelCase__ : List[str] =fs_config.attention_dropout
lowerCamelCase__ : int =fs_config.dropout_input
lowerCamelCase__ : int =fs_config.dropout
lowerCamelCase__ : Optional[int] =fs_config.mask_channel_length
lowerCamelCase__ : Dict =fs_config.mask_channel_prob
lowerCamelCase__ : Optional[Any] =fs_config.mask_length
lowerCamelCase__ : str =fs_config.mask_prob
lowerCamelCase__ : str ='''Wav2Vec2FeatureExtractor'''
lowerCamelCase__ : List[Any] ='''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : str=True ):
"""simple docstring"""
if is_finetuned:
lowerCamelCase__ : Union[str, Any] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowerCamelCase__ : Union[str, Any] =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowerCamelCase__ : List[str] =SEWConfig.from_pretrained(__lowerCamelCase )
else:
lowerCamelCase__ : str =convert_config(model[0] , __lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =model[0].eval()
lowerCamelCase__ : str =True if config.feat_extract_norm == '''layer''' else False
lowerCamelCase__ : Any =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
if is_finetuned:
if dict_path:
lowerCamelCase__ : List[Any] =Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__ : List[Any] =target_dict.pad_index
lowerCamelCase__ : str =target_dict.bos_index
lowerCamelCase__ : Optional[int] =target_dict.pad_index
lowerCamelCase__ : List[str] =target_dict.bos_index
lowerCamelCase__ : str =target_dict.eos_index
lowerCamelCase__ : int =len(target_dict.symbols )
lowerCamelCase__ : List[Any] =os.path.join(__lowerCamelCase , '''vocab.json''' )
if not os.path.isdir(__lowerCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , __lowerCamelCase )
lowerCamelCase__ : Optional[int] =WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__lowerCamelCase , )
lowerCamelCase__ : str =WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
lowerCamelCase__ : Dict =SEWForCTC(__lowerCamelCase )
else:
lowerCamelCase__ : Union[str, Any] =SEWModel(__lowerCamelCase )
feature_extractor.save_pretrained(__lowerCamelCase )
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
hf_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_lowercase : Any = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 717 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = BlenderbotSmallConfig
_a = {}
_a = 'gelu'
def __init__( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=False, lowerCamelCase : Union[str, Any]=99, lowerCamelCase : str=32, lowerCamelCase : List[Any]=2, lowerCamelCase : Optional[int]=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=20, lowerCamelCase : int=2, lowerCamelCase : Any=1, lowerCamelCase : Optional[Any]=0, )-> List[str]:
lowerCamelCase__ : Any =parent
lowerCamelCase__ : Dict =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : Tuple =is_training
lowerCamelCase__ : Dict =use_labels
lowerCamelCase__ : List[Any] =vocab_size
lowerCamelCase__ : str =hidden_size
lowerCamelCase__ : str =num_hidden_layers
lowerCamelCase__ : Union[str, Any] =num_attention_heads
lowerCamelCase__ : Any =intermediate_size
lowerCamelCase__ : Dict =hidden_dropout_prob
lowerCamelCase__ : List[Any] =attention_probs_dropout_prob
lowerCamelCase__ : str =max_position_embeddings
lowerCamelCase__ : Optional[int] =eos_token_id
lowerCamelCase__ : str =pad_token_id
lowerCamelCase__ : Union[str, Any] =bos_token_id
def snake_case ( self : Any )-> Any:
lowerCamelCase__ : Any =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCamelCase__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCamelCase__ : Any =tf.concat([input_ids, eos_tensor], axis=1 )
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : int =self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCamelCase__ : Optional[int] =prepare_blenderbot_small_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return config, inputs_dict
def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Any )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =TFBlenderbotSmallModel(config=lowerCamelCase ).get_decoder()
lowerCamelCase__ : List[Any] =inputs_dict['''input_ids''']
lowerCamelCase__ : Optional[int] =input_ids[:1, :]
lowerCamelCase__ : str =inputs_dict['''attention_mask'''][:1, :]
lowerCamelCase__ : Union[str, Any] =inputs_dict['''head_mask''']
lowerCamelCase__ : Optional[Any] =1
# first forward pass
lowerCamelCase__ : Dict =model(lowerCamelCase, attention_mask=lowerCamelCase, head_mask=lowerCamelCase, use_cache=lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : List[str] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Union[str, Any] =ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Tuple =tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
lowerCamelCase__ : List[str] =tf.concat([input_ids, next_tokens], axis=-1 )
lowerCamelCase__ : str =tf.concat([attention_mask, next_attn_mask], axis=-1 )
lowerCamelCase__ : Optional[int] =model(lowerCamelCase, attention_mask=lowerCamelCase )[0]
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase, past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : Tuple =int(ids_tensor((1,), output_from_past.shape[-1] ) )
lowerCamelCase__ : int =output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : List[str] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, rtol=1E-3 )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : List[str] =tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : str =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_a = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_a = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_a = True
_a = False
_a = False
def snake_case ( self : Any )-> str:
lowerCamelCase__ : Tuple =TFBlenderbotSmallModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase )
def snake_case ( self : Any )-> Optional[int]:
self.config_tester.run_common_tests()
def snake_case ( self : int )-> str:
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
_a = 'facebook/blenderbot_small-90M'
@cached_property
def snake_case ( self : Any )-> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def snake_case ( self : int )-> List[Any]:
lowerCamelCase__ : str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Dict =self.tokenizer(self.src_text, return_tensors='''tf''' )
lowerCamelCase__ : Any =self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase, )
lowerCamelCase__ : Any =self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 625 | 0 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_lowercase : Dict = logging.get_logger(__name__)
_lowercase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_lowercase : List[str] = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_lowercase : Dict = {
"Salesforce/codegen-350M-mono": 2_0_4_8,
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
_a = CodeGenTokenizer
def __init__( self : Optional[Any], lowerCamelCase : Dict=None, lowerCamelCase : str=None, lowerCamelCase : Optional[int]=None, lowerCamelCase : List[Any]="<|endoftext|>", lowerCamelCase : Union[str, Any]="<|endoftext|>", lowerCamelCase : List[Any]="<|endoftext|>", lowerCamelCase : str=False, **lowerCamelCase : int, )-> List[Any]:
super().__init__(
lowerCamelCase, lowerCamelCase, tokenizer_file=lowerCamelCase, unk_token=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, add_prefix_space=lowerCamelCase, **lowerCamelCase, )
if kwargs.pop('''add_bos_token''', lowerCamelCase ):
lowerCamelCase__ : Optional[Any] =kwargs.pop('''name_or_path''', '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'''
F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'''
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
lowerCamelCase__ : List[Any] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', lowerCamelCase ) != add_prefix_space:
lowerCamelCase__ : Any =getattr(lowerCamelCase, pre_tok_state.pop('''type''' ) )
lowerCamelCase__ : Union[str, Any] =add_prefix_space
lowerCamelCase__ : Optional[Any] =pre_tok_class(**lowerCamelCase )
lowerCamelCase__ : Tuple =add_prefix_space
def snake_case ( self : Optional[Any], *lowerCamelCase : Optional[Any], **lowerCamelCase : List[Any] )-> BatchEncoding:
lowerCamelCase__ : List[Any] =kwargs.get('''is_split_into_words''', lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase, **lowerCamelCase )
def snake_case ( self : int, *lowerCamelCase : Dict, **lowerCamelCase : int )-> BatchEncoding:
lowerCamelCase__ : Any =kwargs.get('''is_split_into_words''', lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase, **lowerCamelCase )
def snake_case ( self : Dict, lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
lowerCamelCase__ : Dict =self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
def snake_case ( self : int, lowerCamelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], lowerCamelCase : bool = False, lowerCamelCase : bool = None, lowerCamelCase : Optional[List[str]] = None, **lowerCamelCase : Tuple, )-> str:
lowerCamelCase__ : Tuple =super().decode(
token_ids=lowerCamelCase, skip_special_tokens=lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase, **lowerCamelCase, )
if truncate_before_pattern is not None and len(lowerCamelCase ) > 0:
lowerCamelCase__ : List[str] =self.truncate(lowerCamelCase, lowerCamelCase )
return decoded_text
def snake_case ( self : Optional[Any], lowerCamelCase : Any, lowerCamelCase : str )-> Optional[int]:
def find_re(lowerCamelCase : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : str ):
lowerCamelCase__ : List[str] =pattern.search(lowerCamelCase, lowerCamelCase )
return m.start() if m else -1
lowerCamelCase__ : int =[re.compile(lowerCamelCase, re.MULTILINE ) for pattern in truncate_before_pattern]
lowerCamelCase__ : Union[str, Any] =list(re.finditer('''^print''', lowerCamelCase, re.MULTILINE ) )
if len(lowerCamelCase ) > 1:
lowerCamelCase__ : Dict =completion[: prints[1].start()]
lowerCamelCase__ : Dict =list(re.finditer('''^def''', lowerCamelCase, re.MULTILINE ) )
if len(lowerCamelCase ) > 1:
lowerCamelCase__ : Union[str, Any] =completion[: defs[1].start()]
lowerCamelCase__ : str =0
lowerCamelCase__ : Union[str, Any] =[
pos for pos in [find_re(lowerCamelCase, lowerCamelCase, lowerCamelCase ) for terminal in terminals] if pos != -1
]
if len(lowerCamelCase ) > 0:
return completion[: min(lowerCamelCase )]
else:
return completion
| 718 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ):
"""simple docstring"""
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
"""simple docstring"""
# Base Case
if curr_ind == len(__lowerCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__lowerCamelCase ) ):
if valid_connection(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Insert current vertex into path as next transition
lowerCamelCase__ : Tuple =next_ver
# Validate created path
if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase__ : int =-1
return False
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int = 0 ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[-1] * (len(__lowerCamelCase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase__ : Union[str, Any] =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , 1 ) else []
| 625 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def snake_case__ ( __lowerCamelCase : Tuple="" ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =tempfile.mkdtemp()
return os.path.join(__lowerCamelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Optional[int] =torch.rand(12, dtype=torch.floataa ) - 0.5
lowerCamelCase__ : str =AgentAudio(lowerCamelCase )
lowerCamelCase__ : int =str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase, agent_type.to_raw(), atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCamelCase ) )
# Ensure that the file contains the same value as the original tensor
lowerCamelCase__ : Dict =sf.read(lowerCamelCase )
self.assertTrue(torch.allclose(lowerCamelCase, torch.tensor(lowerCamelCase ), atol=1E-4 ) )
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ : int =torch.rand(12, dtype=torch.floataa ) - 0.5
lowerCamelCase__ : List[Any] =get_new_path(suffix='''.wav''' )
sf.write(lowerCamelCase, lowerCamelCase, 1_6000 )
lowerCamelCase__ : Tuple =AgentAudio(lowerCamelCase )
self.assertTrue(torch.allclose(lowerCamelCase, agent_type.to_raw(), atol=1E-4 ) )
self.assertEqual(agent_type.to_string(), lowerCamelCase )
@require_vision
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ : Tuple =torch.randint(0, 256, (64, 64, 3) )
lowerCamelCase__ : Optional[int] =AgentImage(lowerCamelCase )
lowerCamelCase__ : List[str] =str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase, agent_type._tensor, atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw(), Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase ) )
def snake_case ( self : List[Any] )-> Tuple:
lowerCamelCase__ : Optional[Any] =Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
lowerCamelCase__ : Tuple =Image.open(lowerCamelCase )
lowerCamelCase__ : List[Any] =AgentImage(lowerCamelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase ) )
def snake_case ( self : List[str] )-> Dict:
lowerCamelCase__ : Optional[int] =Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
lowerCamelCase__ : int =Image.open(lowerCamelCase )
lowerCamelCase__ : List[Any] =AgentImage(lowerCamelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase ) )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : str )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] ='''Hey!'''
lowerCamelCase__ : Union[str, Any] =AgentText(lowerCamelCase )
self.assertEqual(lowerCamelCase, agent_type.to_string() )
self.assertEqual(lowerCamelCase, agent_type.to_raw() )
self.assertEqual(lowerCamelCase, lowerCamelCase )
| 719 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowercase : List[str] = 2_5_0_0_0_4
_lowercase : Optional[Any] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = MBartTokenizer
_a = MBartTokenizerFast
_a = True
_a = True
def snake_case ( self : Tuple )-> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Union[str, Any] =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Dict )-> Union[str, Any]:
lowerCamelCase__ : Any =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
lowerCamelCase__ : List[Any] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
lowerCamelCase__ : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
lowerCamelCase__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
lowerCamelCase__ : str =tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
def snake_case ( self : Tuple )-> List[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase__ : int =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[str] =tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCamelCase__ : List[str] =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Any =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__ : Dict =tempfile.mkdtemp()
lowerCamelCase__ : List[str] =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Tuple =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Optional[int] =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Any =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__ : Optional[int] =tempfile.mkdtemp()
lowerCamelCase__ : int =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Dict =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : int =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = 'facebook/mbart-large-en-ro'
_a = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_a = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_a = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case ( cls : List[Any] )-> Optional[int]:
lowerCamelCase__ : MBartTokenizer =MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' )
lowerCamelCase__ : Optional[int] =1
return cls
def snake_case ( self : Optional[Any] )-> List[str]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 25_0020 )
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
def snake_case ( self : Optional[Any] )-> str:
self.assertIn(lowerCamelCase, self.tokenizer.all_special_ids )
lowerCamelCase__ : Optional[int] =[RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
lowerCamelCase__ : Any =self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Optional[int] =['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0], lowerCamelCase )
lowerCamelCase__ : Dict =10
lowerCamelCase__ : Optional[int] =self.tokenizer(lowerCamelCase, max_length=lowerCamelCase, truncation=lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2], 2 )
self.assertEqual(ids[-1], lowerCamelCase )
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
def snake_case ( self : int )-> Any:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_0026, 25_0001] )
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : int =tempfile.mkdtemp()
lowerCamelCase__ : Optional[int] =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =MBartTokenizer.from_pretrained(lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCamelCase )
@require_torch
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ : Optional[Any] =self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, return_tensors='''pt''' )
lowerCamelCase__ : Dict =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case ( self : Optional[Any] )-> Any:
lowerCamelCase__ : str =self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', )
lowerCamelCase__ : List[Any] =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
lowerCamelCase__ : Any =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case ( self : List[Any] )-> Dict:
lowerCamelCase__ : Any =self.tokenizer(self.src_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=3, return_tensors='''pt''' )
lowerCamelCase__ : Tuple =self.tokenizer(
text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=10, return_tensors='''pt''' )
lowerCamelCase__ : Union[str, Any] =targets['''input_ids''']
lowerCamelCase__ : List[Any] =shift_tokens_right(lowerCamelCase, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : str =self.tokenizer._build_translation_inputs(
'''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase ), {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 25_0004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_0001,
}, )
| 625 | 0 |
"""simple docstring"""
from math import factorial
_lowercase : Any = {str(d): factorial(d) for d in range(1_0)}
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(__lowerCamelCase ) )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =7 * factorial(9 ) + 1
return sum(i for i in range(3 , __lowerCamelCase ) if sum_of_digit_factorial(__lowerCamelCase ) == i )
if __name__ == "__main__":
print(f'{solution() = }')
| 720 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 625 | 0 |
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Any =TaConfig.from_json_file(__lowerCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase__ : Optional[int] =TaForConditionalGeneration(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowercase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 721 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 10 , __lowerCamelCase : int = 22 ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =range(1 , __lowerCamelCase )
lowerCamelCase__ : str =range(1 , __lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }')
| 625 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 700 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def snake_case ( self : Dict, lowerCamelCase : List[str], lowerCamelCase : Any )-> Union[str, Any]:
pass
def snake_case ( self : List[str] )-> List[str]:
pass
def snake_case ( self : Optional[Any] )-> str:
pass
def snake_case ( self : Union[str, Any], lowerCamelCase : np.ndarray, lowerCamelCase : np.ndarray, lowerCamelCase : float )-> Dict:
lowerCamelCase__ : Union[str, Any] =np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def snake_case ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : Any=None, **lowerCamelCase : str )-> int:
lowerCamelCase__ : List[str] =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Dict =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str=None, **lowerCamelCase : List[Any] )-> int:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : Tuple =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict=None, **lowerCamelCase : int )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Optional[int] ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[Any] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : int =output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase )
lowerCamelCase__ : List[str] =after_output[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-3 )
def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[Any]=None, **lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.get_vision_text_model(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any ={'''vision_model''': vision_model, '''text_model''': text_model}
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase )
lowerCamelCase__ : List[str] =model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase )
lowerCamelCase__ : int =output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Tuple =to_atuple(vision_model.config.image_size )
lowerCamelCase__ : Optional[Any] =to_atuple(vision_model.config.patch_size )
lowerCamelCase__ : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCamelCase__ : int =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCamelCase__ : List[Any] =output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def snake_case ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any] )-> Any:
pt_model.to(lowerCamelCase )
pt_model.eval()
# prepare inputs
lowerCamelCase__ : Any =inputs_dict
lowerCamelCase__ : Any ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCamelCase__ : List[str] =pt_model(**lowerCamelCase ).to_tuple()
lowerCamelCase__ : Optional[Any] =fx_model(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase )
lowerCamelCase__ : List[Any] =fx_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase )
lowerCamelCase__ : str =VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase )
pt_model_loaded.to(lowerCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] =pt_model_loaded(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2 )
def snake_case ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : str )-> List[Any]:
lowerCamelCase__ : Any =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : str =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase )
lowerCamelCase__ : Tuple =fx_state
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any] )-> Optional[int]:
lowerCamelCase__ : Dict =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =VisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : List[str] =FlaxVisionTextDualEncoderModel(lowerCamelCase )
lowerCamelCase__ : Tuple =load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params )
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Union[str, Any]:
lowerCamelCase__ : Any =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : int =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase )
def snake_case ( self : Tuple )-> Any:
lowerCamelCase__ : Tuple =self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase )
def snake_case ( self : str )-> Any:
lowerCamelCase__ : str =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase )
@is_pt_flax_cross_test
def snake_case ( self : Tuple )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.prepare_config_and_inputs()
lowerCamelCase__ : Union[str, Any] =config_inputs_dict.pop('''vision_config''' )
lowerCamelCase__ : Optional[Any] =config_inputs_dict.pop('''text_config''' )
lowerCamelCase__ : Tuple =config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase )
self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@slow
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.get_pretrained_model_and_inputs()
lowerCamelCase__ : Optional[int] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[str] =outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase )
lowerCamelCase__ : int =FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =model_a(**lowerCamelCase )
lowerCamelCase__ : List[Any] =after_outputs[0]
lowerCamelCase__ : Any =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase, 1E-5 )
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : List[str] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : List[str] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : Optional[int] =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Any ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : int )-> int:
lowerCamelCase__ : str =FlaxViTModel(lowerCamelCase )
lowerCamelCase__ : Any =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : int )-> Optional[int]:
lowerCamelCase__ : Any =FlaxViTModelTester(self )
lowerCamelCase__ : Union[str, Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =vit_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : Any =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Union[str, Any] =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''', '''hf-internal-testing/tiny-bert''', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =13
lowerCamelCase__ : Optional[Any] =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCamelCase__ : Union[str, Any] =ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
lowerCamelCase__ : str =random_attention_mask([batch_size, 4] )
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case ( self : List[str], lowerCamelCase : Any, lowerCamelCase : Dict )-> Dict:
lowerCamelCase__ : str =FlaxCLIPVisionModel(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =FlaxBertModel(lowerCamelCase )
return vision_model, text_model
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : List[Any] =FlaxCLIPVisionModelTester(self )
lowerCamelCase__ : List[Any] =FlaxBertModelTester(self )
lowerCamelCase__ : Any =clip_model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[int] =bert_model_tester.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : List[Any] =vision_config_and_inputs
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : Any =FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''', logit_scale_init_value=1.0 )
lowerCamelCase__ : List[Any] =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
lowerCamelCase__ : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase__ : Dict =processor(
text=['''una foto di un gatto''', '''una foto di un cane'''], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='''np''' )
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
lowerCamelCase__ : Any =np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3 ) )
| 625 | 0 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_lowercase : Tuple = True
except (ImportError, AttributeError):
_lowercase : Tuple = object
def snake_case__ ( *__lowerCamelCase : str , **__lowerCamelCase : Tuple ):
"""simple docstring"""
pass
_lowercase : Optional[Any] = False
_lowercase : List[str] = logging.get_logger("transformers-cli/serving")
def snake_case__ ( __lowerCamelCase : Namespace ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__lowerCamelCase , args.host , args.port , args.workers )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 4_2
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 4_2
_a = 4_2
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 4_2
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 4_2
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def snake_case ( lowerCamelCase : ArgumentParser )-> int:
lowerCamelCase__ : Optional[int] =parser.add_parser(
'''serve''', help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''', type=lowerCamelCase, choices=get_supported_tasks(), help='''The task to run the pipeline on''', )
serve_parser.add_argument('''--host''', type=lowerCamelCase, default='''localhost''', help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''', type=lowerCamelCase, default=8888, help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''', type=lowerCamelCase, default=1, help='''Number of http workers''' )
serve_parser.add_argument('''--model''', type=lowerCamelCase, help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''', type=lowerCamelCase, help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''', type=lowerCamelCase, help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''', type=lowerCamelCase, default=-1, help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''', )
serve_parser.set_defaults(func=lowerCamelCase )
def __init__( self : Any, lowerCamelCase : Pipeline, lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : int )-> Any:
lowerCamelCase__ : Dict =pipeline
lowerCamelCase__ : List[Any] =host
lowerCamelCase__ : List[Any] =port
lowerCamelCase__ : Union[str, Any] =workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F'''Serving model over {host}:{port}''' )
lowerCamelCase__ : Union[str, Any] =FastAPI(
routes=[
APIRoute(
'''/''', self.model_info, response_model=lowerCamelCase, response_class=lowerCamelCase, methods=['''GET'''], ),
APIRoute(
'''/tokenize''', self.tokenize, response_model=lowerCamelCase, response_class=lowerCamelCase, methods=['''POST'''], ),
APIRoute(
'''/detokenize''', self.detokenize, response_model=lowerCamelCase, response_class=lowerCamelCase, methods=['''POST'''], ),
APIRoute(
'''/forward''', self.forward, response_model=lowerCamelCase, response_class=lowerCamelCase, methods=['''POST'''], ),
], timeout=600, )
def snake_case ( self : Dict )-> Union[str, Any]:
run(self._app, host=self.host, port=self.port, workers=self.workers )
def snake_case ( self : Optional[int] )-> Tuple:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def snake_case ( self : int, lowerCamelCase : str = Body(lowerCamelCase, embed=lowerCamelCase ), lowerCamelCase : bool = Body(lowerCamelCase, embed=lowerCamelCase ) )-> Union[str, Any]:
try:
lowerCamelCase__ : Dict =self._pipeline.tokenizer.tokenize(lowerCamelCase )
if return_ids:
lowerCamelCase__ : int =self._pipeline.tokenizer.convert_tokens_to_ids(lowerCamelCase )
return ServeTokenizeResult(tokens=lowerCamelCase, tokens_ids=lowerCamelCase )
else:
return ServeTokenizeResult(tokens=lowerCamelCase )
except Exception as e:
raise HTTPException(status_code=500, detail={'''model''': '''''', '''error''': str(lowerCamelCase )} )
def snake_case ( self : Optional[Any], lowerCamelCase : List[int] = Body(lowerCamelCase, embed=lowerCamelCase ), lowerCamelCase : bool = Body(lowerCamelCase, embed=lowerCamelCase ), lowerCamelCase : bool = Body(lowerCamelCase, embed=lowerCamelCase ), )-> Union[str, Any]:
try:
lowerCamelCase__ : List[str] =self._pipeline.tokenizer.decode(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return ServeDeTokenizeResult(model='''''', text=lowerCamelCase )
except Exception as e:
raise HTTPException(status_code=500, detail={'''model''': '''''', '''error''': str(lowerCamelCase )} )
async def snake_case ( self : Union[str, Any], lowerCamelCase : Tuple=Body(lowerCamelCase, embed=lowerCamelCase ) )-> Tuple:
# Check we don't have empty string
if len(lowerCamelCase ) == 0:
return ServeForwardResult(output=[], attention=[] )
try:
# Forward through the model
lowerCamelCase__ : Union[str, Any] =self._pipeline(lowerCamelCase )
return ServeForwardResult(output=lowerCamelCase )
except Exception as e:
raise HTTPException(500, {'''error''': str(lowerCamelCase )} )
| 701 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if index == number_of_items:
return 0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : List[str] =knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 )
if weights[index] <= max_weight:
lowerCamelCase__ : Dict =values[index] + knapsack(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_weight - weights[index] , index + 1 )
return max(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 0 |
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : int =logging.get_logger(__name__)
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : str =SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
lowerCamelCase__ : List[Any] =MaskFormerConfig(backbone_config=__lowerCamelCase )
lowerCamelCase__ : List[str] ='''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
lowerCamelCase__ : Dict =847
lowerCamelCase__ : str ='''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
lowerCamelCase__ : int =150
lowerCamelCase__ : Tuple ='''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
lowerCamelCase__ : str =171
lowerCamelCase__ : str ='''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
lowerCamelCase__ : Any =133
lowerCamelCase__ : Any ='''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
lowerCamelCase__ : Union[str, Any] =19
lowerCamelCase__ : Any ='''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
lowerCamelCase__ : Tuple =65
lowerCamelCase__ : int ='''mapillary-vistas-id2label.json'''
lowerCamelCase__ : Tuple =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : Tuple ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
return config
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : int =[]
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =dct.pop(__lowerCamelCase )
lowerCamelCase__ : Any =val
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Any =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCamelCase__ : Any =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase__ : Dict =state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
lowerCamelCase__ : Optional[int] =state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Optional[int] =in_proj_weight[:dim, :]
lowerCamelCase__ : Optional[Any] =in_proj_bias[: dim]
lowerCamelCase__ : Optional[Any] =in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase__ : str =in_proj_bias[
dim : dim * 2
]
lowerCamelCase__ : Any =in_proj_weight[
-dim :, :
]
lowerCamelCase__ : Optional[Any] =in_proj_bias[-dim :]
# fmt: on
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Any =config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase__ : str =state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
lowerCamelCase__ : Dict =state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Optional[Any] =in_proj_weight[: hidden_size, :]
lowerCamelCase__ : int =in_proj_bias[:config.hidden_size]
lowerCamelCase__ : Optional[Any] =in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase__ : Union[str, Any] =in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase__ : List[Any] =in_proj_weight[-hidden_size :, :]
lowerCamelCase__ : int =in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase__ : Dict =state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
lowerCamelCase__ : Optional[Any] =state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : str =in_proj_weight[: hidden_size, :]
lowerCamelCase__ : Any =in_proj_bias[:config.hidden_size]
lowerCamelCase__ : List[Any] =in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase__ : Optional[Any] =in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase__ : Tuple =in_proj_weight[-hidden_size :, :]
lowerCamelCase__ : Any =in_proj_bias[-hidden_size :]
# fmt: on
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ : str =Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : bool = False ):
"""simple docstring"""
lowerCamelCase__ : int =get_maskformer_config(__lowerCamelCase )
# load original state_dict
with open(__lowerCamelCase , '''rb''' ) as f:
lowerCamelCase__ : Dict =pickle.load(__lowerCamelCase )
lowerCamelCase__ : List[str] =data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowerCamelCase__ : Optional[Any] =create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_swin_q_k_v(__lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCamelCase , __lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
lowerCamelCase__ : int =torch.from_numpy(__lowerCamelCase )
# load 🤗 model
lowerCamelCase__ : List[str] =MaskFormerForInstanceSegmentation(__lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(__lowerCamelCase , param.shape )
lowerCamelCase__ : int =model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowerCamelCase ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
lowerCamelCase__ : Optional[Any] =prepare_img()
if "vistas" in model_name:
lowerCamelCase__ : List[Any] =65
elif "cityscapes" in model_name:
lowerCamelCase__ : str =65535
else:
lowerCamelCase__ : Any =255
lowerCamelCase__ : List[str] =True if '''ade''' in model_name else False
lowerCamelCase__ : Dict =MaskFormerImageProcessor(ignore_index=__lowerCamelCase , reduce_labels=__lowerCamelCase )
lowerCamelCase__ : Tuple =image_processor(__lowerCamelCase , return_tensors='''pt''' )
lowerCamelCase__ : str =model(**__lowerCamelCase )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
lowerCamelCase__ : Optional[Any] =torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowercase : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : Dict =parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 702 |
"""simple docstring"""
_lowercase : Optional[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 625 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = ["MobileNetV2FeatureExtractor"]
_lowercase : Any = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[int] ):
"""simple docstring"""
if not numbers:
return 0
if not isinstance(__lowerCamelCase , (list, tuple) ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCamelCase__ : Any =numbers[0]
for i in range(1 , len(__lowerCamelCase ) ):
# update the maximum and minimum subarray products
lowerCamelCase__ : Dict =numbers[i]
if number < 0:
lowerCamelCase__ , lowerCamelCase__ : List[Any] =min_till_now, max_till_now
lowerCamelCase__ : Optional[int] =max(__lowerCamelCase , max_till_now * number )
lowerCamelCase__ : Dict =min(__lowerCamelCase , min_till_now * number )
# update the maximum product found till now
lowerCamelCase__ : Tuple =max(__lowerCamelCase , __lowerCamelCase )
return max_prod
| 625 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Tuple = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 42
_a = 42
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 42
_a = (1_6, 3_2, 9_6, 2_5_6)
_a = jnp.floataa
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Tuple =nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
lowerCamelCase__ : Dict =[]
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase__ : Dict =self.block_out_channels[i]
lowerCamelCase__ : Dict =self.block_out_channels[i + 1]
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Optional[int] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Any =blocks
lowerCamelCase__ : Optional[int] =nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : Any, lowerCamelCase : int )-> List[str]:
lowerCamelCase__ : Tuple =self.conv_in(lowerCamelCase )
lowerCamelCase__ : Dict =nn.silu(lowerCamelCase )
for block in self.blocks:
lowerCamelCase__ : str =block(lowerCamelCase )
lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase )
lowerCamelCase__ : Any =self.conv_out(lowerCamelCase )
return embedding
@flax_register_to_config
class __SCREAMING_SNAKE_CASE ( nn.Module , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_a = 3_2
_a = 4
_a = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_a = False
_a = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_a = 2
_a = 8
_a = None
_a = 1_2_8_0
_a = 0.0
_a = False
_a = jnp.floataa
_a = True
_a = 0
_a = "rgb"
_a = (1_6, 3_2, 9_6, 2_5_6)
def snake_case ( self : str, lowerCamelCase : jax.random.KeyArray )-> FrozenDict:
# init input tensors
lowerCamelCase__ : int =(1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ : int =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ : Union[str, Any] =jnp.ones((1,), dtype=jnp.intaa )
lowerCamelCase__ : str =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
lowerCamelCase__ : Any =(1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase__ : Optional[Any] =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =jax.random.split(lowerCamelCase )
lowerCamelCase__ : Dict ={'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )["params"]
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Optional[int] =self.block_out_channels
lowerCamelCase__ : Tuple =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ : List[Any] =self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ : int =nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
lowerCamelCase__ : str =FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
lowerCamelCase__ : Dict =FlaxTimestepEmbedding(lowerCamelCase, dtype=self.dtype )
lowerCamelCase__ : List[Any] =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
lowerCamelCase__ : Dict =self.only_cross_attention
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =(only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[str] =(num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ : Union[str, Any] =[]
lowerCamelCase__ : Dict =[]
lowerCamelCase__ : List[Any] =block_out_channels[0]
lowerCamelCase__ : List[Any] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ : List[Any] =output_channel
lowerCamelCase__ : str =block_out_channels[i]
lowerCamelCase__ : Dict =i == len(lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ : str =FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
lowerCamelCase__ : List[Any] =FlaxDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(lowerCamelCase )
for _ in range(self.layers_per_block ):
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
if not is_final_block:
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
lowerCamelCase__ : int =down_blocks
lowerCamelCase__ : List[str] =controlnet_down_blocks
# mid
lowerCamelCase__ : Tuple =block_out_channels[-1]
lowerCamelCase__ : List[Any] =FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : int, lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : float = 1.0, lowerCamelCase : bool = True, lowerCamelCase : bool = False, )-> Union[FlaxControlNetOutput, Tuple]:
lowerCamelCase__ : int =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase__ : int =jnp.flip(lowerCamelCase, axis=1 )
# 1. time
if not isinstance(lowerCamelCase, jnp.ndarray ):
lowerCamelCase__ : Any =jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(lowerCamelCase, jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] =timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ : int =jnp.expand_dims(lowerCamelCase, 0 )
lowerCamelCase__ : Optional[Any] =self.time_proj(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.time_embedding(lowerCamelCase )
# 2. pre-process
lowerCamelCase__ : Optional[int] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : Dict =self.conv_in(lowerCamelCase )
lowerCamelCase__ : List[str] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : int =self.controlnet_cond_embedding(lowerCamelCase )
sample += controlnet_cond
# 3. down
lowerCamelCase__ : Union[str, Any] =(sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : Dict =down_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ : Tuple =down_block(lowerCamelCase, lowerCamelCase, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase__ : Optional[int] =self.mid_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
# 5. contronet blocks
lowerCamelCase__ : Optional[Any] =()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase, self.controlnet_down_blocks ):
lowerCamelCase__ : Union[str, Any] =controlnet_block(lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ : List[str] =controlnet_down_block_res_samples
lowerCamelCase__ : List[str] =self.controlnet_mid_block(lowerCamelCase )
# 6. scaling
lowerCamelCase__ : Union[str, Any] =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase, mid_block_res_sample=lowerCamelCase )
| 625 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = DiTPipeline
_a = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_a = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
_a = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_a = False
def snake_case ( self : List[str] )-> List[str]:
torch.manual_seed(0 )
lowerCamelCase__ : int =TransformeraDModel(
sample_size=16, num_layers=2, patch_size=4, attention_head_dim=8, num_attention_heads=2, in_channels=4, out_channels=8, attention_bias=lowerCamelCase, activation_fn='''gelu-approximate''', num_embeds_ada_norm=1000, norm_type='''ada_norm_zero''', norm_elementwise_affine=lowerCamelCase, )
lowerCamelCase__ : Tuple =AutoencoderKL()
lowerCamelCase__ : Optional[int] =DDIMScheduler()
lowerCamelCase__ : Dict ={'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def snake_case ( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Optional[int]=0 )-> Dict:
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : str =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Dict =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def snake_case ( self : Dict )-> int:
lowerCamelCase__ : Union[str, Any] ='''cpu'''
lowerCamelCase__ : Optional[Any] =self.get_dummy_components()
lowerCamelCase__ : Any =self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : List[str] =self.get_dummy_inputs(lowerCamelCase )
lowerCamelCase__ : Any =pipe(**lowerCamelCase ).images
lowerCamelCase__ : List[str] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 16, 16, 3) )
lowerCamelCase__ : Union[str, Any] =np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowerCamelCase__ : Optional[int] =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase, 1E-3 )
def snake_case ( self : str )-> List[str]:
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase, expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def snake_case ( self : Dict )-> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Dict )-> str:
lowerCamelCase__ : Any =torch.manual_seed(0 )
lowerCamelCase__ : Any =DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
lowerCamelCase__ : Optional[Any] =['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
lowerCamelCase__ : int =pipe.get_label_ids(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =pipe(lowerCamelCase, generator=lowerCamelCase, num_inference_steps=40, output_type='''np''' ).images
for word, image in zip(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Optional[int] =load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def snake_case ( self : Any )-> int:
lowerCamelCase__ : Union[str, Any] =DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
lowerCamelCase__ : Any =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
lowerCamelCase__ : Any =['''vase''', '''umbrella''']
lowerCamelCase__ : Dict =pipe.get_label_ids(lowerCamelCase )
lowerCamelCase__ : Tuple =torch.manual_seed(0 )
lowerCamelCase__ : Tuple =pipe(lowerCamelCase, generator=lowerCamelCase, num_inference_steps=25, output_type='''np''' ).images
for word, image in zip(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 705 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["CLIPFeatureExtractor"]
_lowercase : int = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 625 | 0 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : Union[str, Any] = 1_6
_lowercase : List[Any] = 3_2
def snake_case__ ( __lowerCamelCase : Accelerator , __lowerCamelCase : int = 16 , __lowerCamelCase : str = "bert-base-cased" ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =AutoTokenizer.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase : str ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ : Union[str, Any] =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase__ : Optional[Any] =datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ : List[str] =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCamelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__lowerCamelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCamelCase__ : Optional[Any] =DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowerCamelCase__ : Dict =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : int =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : Optional[int] =config['''lr''']
lowerCamelCase__ : Optional[Any] =int(config['''num_epochs'''] )
lowerCamelCase__ : str =int(config['''seed'''] )
lowerCamelCase__ : Dict =int(config['''batch_size'''] )
lowerCamelCase__ : Optional[Any] =args.model_name_or_path
set_seed(__lowerCamelCase )
lowerCamelCase__ : Optional[int] =get_dataloaders(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : Optional[int] =AutoModelForSequenceClassification.from_pretrained(__lowerCamelCase , return_dict=__lowerCamelCase )
# Instantiate optimizer
lowerCamelCase__ : int =(
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase__ : List[str] =optimizer_cls(params=model.parameters() , lr=__lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase__ : Optional[int] =accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCamelCase__ : Dict =1
lowerCamelCase__ : str =(len(__lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase__ : Dict =get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=0 , num_training_steps=__lowerCamelCase , )
else:
lowerCamelCase__ : Dict =DummyScheduler(__lowerCamelCase , total_num_steps=__lowerCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ : int =accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase__ : Optional[int] =0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase__ : Union[str, Any] =0
# Now we train the model
lowerCamelCase__ : str =evaluate.load('''glue''' , '''mrpc''' )
lowerCamelCase__ : Any =0
lowerCamelCase__ : str ={}
for epoch in range(__lowerCamelCase , __lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Tuple =model(**__lowerCamelCase )
lowerCamelCase__ : Any =outputs.loss
lowerCamelCase__ : Union[str, Any] =loss / gradient_accumulation_steps
accelerator.backward(__lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowerCamelCase__ : Optional[Any] =0
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : List[Any] =model(**__lowerCamelCase )
lowerCamelCase__ : Dict =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase__ : Union[str, Any] =accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__lowerCamelCase ) - 1:
lowerCamelCase__ : str =predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase__ : Dict =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowerCamelCase__ : Tuple =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __lowerCamelCase )
lowerCamelCase__ : Any =eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
lowerCamelCase__ : Any =eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : str =argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__lowerCamelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__lowerCamelCase , )
parser.add_argument(
'''--output_dir''' , type=__lowerCamelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=__lowerCamelCase , default=__lowerCamelCase , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=__lowerCamelCase , default=3 , help='''Number of train epochs.''' , )
lowerCamelCase__ : int =parser.parse_args()
lowerCamelCase__ : Union[str, Any] ={'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 706 |
"""simple docstring"""
import os
def snake_case__ ( ):
"""simple docstring"""
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowerCamelCase__ : Tuple =str(file.readlines()[0] )
lowerCamelCase__ : int =names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : str =0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict =0
return total_score
if __name__ == "__main__":
print(solution())
| 625 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowercase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowercase : Any = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=8 ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase__ : List[str] =width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Any=512 , __lowerCamelCase : Optional[int]=512 ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowerCamelCase__ : List[str] =np.array(pil_image.convert('''RGB''' ) )
lowerCamelCase__ : int =arr.astype(np.floataa ) / 127.5 - 1
lowerCamelCase__ : Optional[Any] =np.transpose(__lowerCamelCase , [2, 0, 1] )
lowerCamelCase__ : List[str] =torch.from_numpy(__lowerCamelCase ).unsqueeze(0 )
return image
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[Any], lowerCamelCase : UNetaDConditionModel, lowerCamelCase : DDPMScheduler, lowerCamelCase : VQModel, )-> Optional[Any]:
super().__init__()
self.register_modules(
unet=lowerCamelCase, scheduler=lowerCamelCase, movq=lowerCamelCase, )
lowerCamelCase__ : Optional[Any] =2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case ( self : Dict, lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : str )-> Dict:
# get the original timestep using init_timestep
lowerCamelCase__ : List[Any] =min(int(num_inference_steps * strength ), lowerCamelCase )
lowerCamelCase__ : Tuple =max(num_inference_steps - init_timestep, 0 )
lowerCamelCase__ : str =self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case ( self : Optional[int], lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Optional[int]=None )-> Union[str, Any]:
if not isinstance(lowerCamelCase, (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase )}''' )
lowerCamelCase__ : List[str] =image.to(device=lowerCamelCase, dtype=lowerCamelCase )
lowerCamelCase__ : Dict =batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowerCamelCase__ : List[str] =image
else:
if isinstance(lowerCamelCase, lowerCamelCase ) and len(lowerCamelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCamelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =[
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCamelCase )
]
lowerCamelCase__ : List[Any] =torch.cat(lowerCamelCase, dim=0 )
else:
lowerCamelCase__ : int =self.movq.encode(lowerCamelCase ).latent_dist.sample(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.movq.config.scaling_factor * init_latents
lowerCamelCase__ : Optional[int] =torch.cat([init_latents], dim=0 )
lowerCamelCase__ : Tuple =init_latents.shape
lowerCamelCase__ : Union[str, Any] =randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase )
# get latents
lowerCamelCase__ : Any =self.scheduler.add_noise(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =init_latents
return latents
def snake_case ( self : Optional[int], lowerCamelCase : int=0 )-> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowerCamelCase__ : Optional[Any] =torch.device(F'''cuda:{gpu_id}''' )
lowerCamelCase__ : str =[
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[str], lowerCamelCase : str=0 )-> List[Any]:
if is_accelerate_available() and is_accelerate_version('''>=''', '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
lowerCamelCase__ : List[str] =torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''', silence_dtype_warnings=lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase__ : str =None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase__ : int =cpu_offload_with_hook(lowerCamelCase, lowerCamelCase, prev_module_hook=lowerCamelCase )
# We'll offload the last model manually.
lowerCamelCase__ : int =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Optional[Any] )-> List[str]:
if not hasattr(self.unet, '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase, '''_hf_hook''' )
and hasattr(module._hf_hook, '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase )
def __call__( self : List[str], lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]], lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]], lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 100, lowerCamelCase : float = 4.0, lowerCamelCase : float = 0.3, lowerCamelCase : int = 1, lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, )-> List[str]:
lowerCamelCase__ : Tuple =self._execution_device
lowerCamelCase__ : str =guidance_scale > 1.0
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[str] =torch.cat(lowerCamelCase, dim=0 )
lowerCamelCase__ : Any =image_embeds.shape[0]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =torch.cat(lowerCamelCase, dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ : Dict =image_embeds.repeat_interleave(lowerCamelCase, dim=0 )
lowerCamelCase__ : Optional[int] =negative_image_embeds.repeat_interleave(lowerCamelCase, dim=0 )
lowerCamelCase__ : Any =torch.cat([negative_image_embeds, image_embeds], dim=0 ).to(dtype=self.unet.dtype, device=lowerCamelCase )
if not isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Optional[int] =[image]
if not all(isinstance(lowerCamelCase, (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'''Input is in incorrect format: {[type(lowerCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
lowerCamelCase__ : Union[str, Any] =torch.cat([prepare_image(lowerCamelCase, lowerCamelCase, lowerCamelCase ) for i in image], dim=0 )
lowerCamelCase__ : int =image.to(dtype=image_embeds.dtype, device=lowerCamelCase )
lowerCamelCase__ : Tuple =self.movq.encode(lowerCamelCase )['''latents''']
lowerCamelCase__ : int =latents.repeat_interleave(lowerCamelCase, dim=0 )
self.scheduler.set_timesteps(lowerCamelCase, device=lowerCamelCase )
lowerCamelCase__ : Tuple =self.get_timesteps(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any =timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowerCamelCase__ : str =downscale_height_and_width(lowerCamelCase, lowerCamelCase, self.movq_scale_factor )
lowerCamelCase__ : Union[str, Any] =self.prepare_latents(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, image_embeds.dtype, lowerCamelCase, lowerCamelCase )
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ : Dict =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ : Dict ={'''image_embeds''': image_embeds}
lowerCamelCase__ : int =self.unet(
sample=lowerCamelCase, timestep=lowerCamelCase, encoder_hidden_states=lowerCamelCase, added_cond_kwargs=lowerCamelCase, return_dict=lowerCamelCase, )[0]
if do_classifier_free_guidance:
lowerCamelCase__ : Dict =noise_pred.split(latents.shape[1], dim=1 )
lowerCamelCase__ : Optional[Any] =noise_pred.chunk(2 )
lowerCamelCase__ : int =variance_pred.chunk(2 )
lowerCamelCase__ : Optional[Any] =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase__ : Union[str, Any] =torch.cat([noise_pred, variance_pred_text], dim=1 )
if not (
hasattr(self.scheduler.config, '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase__ : List[str] =noise_pred.split(latents.shape[1], dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : int =self.scheduler.step(
lowerCamelCase, lowerCamelCase, lowerCamelCase, generator=lowerCamelCase, )[0]
# post-processing
lowerCamelCase__ : Union[str, Any] =self.movq.decode(lowerCamelCase, force_not_quantize=lowerCamelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowerCamelCase__ : Dict =image * 0.5 + 0.5
lowerCamelCase__ : str =image.clamp(0, 1 )
lowerCamelCase__ : List[str] =image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase__ : Tuple =self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 707 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : int )-> None:
lowerCamelCase__ : str =value
lowerCamelCase__ : Node | None =None
lowerCamelCase__ : Node | None =None
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int, lowerCamelCase : Node )-> None:
lowerCamelCase__ : Any =tree
def snake_case ( self : str, lowerCamelCase : Node | None )-> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict )-> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowercase : Dict = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowercase : Optional[Any] = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowercase : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =len([g for position, g in enumerate(__lowerCamelCase ) if g == main_target[position]] )
return (item, float(__lowerCamelCase ))
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Tuple =random.randint(0 , len(__lowerCamelCase ) - 1 )
lowerCamelCase__ : Union[str, Any] =parent_a[:random_slice] + parent_a[random_slice:]
lowerCamelCase__ : List[Any] =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : list[str] ):
"""simple docstring"""
lowerCamelCase__ : Any =list(__lowerCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCamelCase__ : Optional[int] =random.choice(__lowerCamelCase )
return "".join(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : tuple[str, float] , __lowerCamelCase : list[tuple[str, float]] , __lowerCamelCase : list[str] , ):
"""simple docstring"""
lowerCamelCase__ : Dict =[]
# Generate more children proportionally to the fitness score.
lowerCamelCase__ : Any =int(parent_a[1] * 100 ) + 1
lowerCamelCase__ : Tuple =10 if child_n >= 10 else child_n
for _ in range(__lowerCamelCase ):
lowerCamelCase__ : List[str] =population_score[random.randint(0 , __lowerCamelCase )][0]
lowerCamelCase__ : Dict =crossover(parent_a[0] , __lowerCamelCase )
# Append new string to the population list.
pop.append(mutate(__lowerCamelCase , __lowerCamelCase ) )
pop.append(mutate(__lowerCamelCase , __lowerCamelCase ) )
return pop
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : list[str] , __lowerCamelCase : bool = True ):
"""simple docstring"""
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCamelCase__ : List[Any] =f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(__lowerCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCamelCase__ : Dict =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCamelCase__ : Tuple =f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(__lowerCamelCase )
# Generate random starting population.
lowerCamelCase__ : List[str] =[]
for _ in range(__lowerCamelCase ):
population.append(''''''.join([random.choice(__lowerCamelCase ) for i in range(len(__lowerCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCamelCase__ : Union[str, Any] =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__lowerCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCamelCase__ : Tuple =[evaluate(__lowerCamelCase , __lowerCamelCase ) for item in population]
# Check if there is a matching evolution.
lowerCamelCase__ : Optional[int] =sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCamelCase__ : List[str] =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__lowerCamelCase )
# Normalize population score to be between 0 and 1.
lowerCamelCase__ : List[str] =[
(item, score / len(__lowerCamelCase )) for item, score in population_score
]
# This is selection
for i in range(__lowerCamelCase ):
population.extend(select(population_score[int(__lowerCamelCase )] , __lowerCamelCase , __lowerCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__lowerCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
_lowercase : Any = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_lowercase : Any = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_lowercase : List[str] = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 708 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowercase : List[str] = logging.getLogger(__name__)
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
"""simple docstring"""
# save results
if os.path.exists(__lowerCamelCase ):
if os.path.exists(os.path.join(__lowerCamelCase , '''config.json''' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , '''config.json''' ) ):
os.remove(os.path.join(__lowerCamelCase , '''config.json''' ) )
if os.path.exists(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(__lowerCamelCase , '''pytorch_model.bin''' ) )
else:
os.makedirs(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =2
if unlogit:
lowerCamelCase__ : Any =torch.pow(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[str] =p * torch.log(__lowerCamelCase )
lowerCamelCase__ : Tuple =0
return -plogp.sum(dim=-1 )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(__lowerCamelCase ) ) ) )
for row in range(len(__lowerCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=False ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Tuple =model.config.num_hidden_layers, model.config.num_attention_heads
lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
lowerCamelCase__ : Optional[Any] =torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
if head_mask is None:
lowerCamelCase__ : List[Any] =torch.ones(__lowerCamelCase , __lowerCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCamelCase__ : Union[str, Any] =None
lowerCamelCase__ : List[str] =0.0
lowerCamelCase__ : Union[str, Any] =0.0
for step, inputs in enumerate(tqdm(__lowerCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowerCamelCase__ : Any =tuple(t.to(args.device ) for t in inputs )
((lowerCamelCase__) , ) : Any =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCamelCase__ : Dict =model(__lowerCamelCase , labels=__lowerCamelCase , head_mask=__lowerCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Any =entropy(attn.detach() , __lowerCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCamelCase__ : int =2
lowerCamelCase__ : List[str] =torch.pow(torch.pow(__lowerCamelCase , __lowerCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
lowerCamelCase__ : int =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(__lowerCamelCase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(__lowerCamelCase )
logger.info('''Head ranked by importance scores''' )
lowerCamelCase__ : Optional[int] =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCamelCase__ : Dict =torch.arange(
head_importance.numel() , device=args.device )
lowerCamelCase__ : Any =head_ranks.view_as(__lowerCamelCase )
print_ad_tensor(__lowerCamelCase )
return attn_entropy, head_importance, total_loss
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase )
lowerCamelCase__ : int =1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __lowerCamelCase , original_score * args.masking_threshold )
lowerCamelCase__ : Dict =torch.ones_like(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCamelCase__ : List[Any] =original_score
while current_score >= original_score * args.masking_threshold:
lowerCamelCase__ : List[Any] =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCamelCase__ : int =float('''Inf''' )
lowerCamelCase__ : Union[str, Any] =head_importance.view(-1 ).sort()[1]
if len(__lowerCamelCase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowerCamelCase__ : List[str] =current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowerCamelCase__ : Optional[int] =new_head_mask.view(-1 )
lowerCamelCase__ : Optional[Any] =0.0
lowerCamelCase__ : Dict =new_head_mask.view_as(__lowerCamelCase )
lowerCamelCase__ : Tuple =new_head_mask.clone().detach()
print_ad_tensor(__lowerCamelCase )
# Compute metric and head importance again
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : Any =1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(__lowerCamelCase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =datetime.now()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : Tuple =1 / loss
lowerCamelCase__ : Optional[Any] =datetime.now() - before_time
lowerCamelCase__ : int =sum(p.numel() for p in model.parameters() )
lowerCamelCase__ : Any ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Optional[int] =[
v,
]
assert sum(len(__lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCamelCase )
lowerCamelCase__ : List[str] =sum(p.numel() for p in model.parameters() )
lowerCamelCase__ : Any =datetime.now()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase , actually_pruned=__lowerCamelCase , )
lowerCamelCase__ : str =1 / loss
lowerCamelCase__ : Union[str, Any] =datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __lowerCamelCase , __lowerCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __lowerCamelCase , __lowerCamelCase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(__lowerCamelCase , args.output_dir )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__lowerCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__lowerCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__lowerCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__lowerCamelCase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=__lowerCamelCase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__lowerCamelCase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__lowerCamelCase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=__lowerCamelCase , default=42 )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
lowerCamelCase__ : List[Any] =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCamelCase__ : Dict =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowerCamelCase__ : Dict =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCamelCase__ : str =torch.device('''cuda''' , args.local_rank )
lowerCamelCase__ : Any =1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCamelCase__ : Union[str, Any] =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCamelCase__ : List[Any] =nn.parallel.DistributedDataParallel(
__lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCamelCase )
elif args.n_gpu > 1:
lowerCamelCase__ : int =nn.DataParallel(__lowerCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Prepare dataset
lowerCamelCase__ : Union[str, Any] =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCamelCase__ : Any =(torch.from_numpy(__lowerCamelCase ),)
lowerCamelCase__ : List[Any] =TensorDataset(*__lowerCamelCase )
lowerCamelCase__ : List[str] =RandomSampler(__lowerCamelCase )
lowerCamelCase__ : Dict =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCamelCase__ : Optional[int] =mask_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
prune_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 625 | 0 |
"""simple docstring"""
from copy import deepcopy
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str], lowerCamelCase : list[int] | None = None, lowerCamelCase : int | None = None )-> None:
if arr is None and size is not None:
lowerCamelCase__ : Any =size
lowerCamelCase__ : Optional[int] =[0] * size
elif arr is not None:
self.init(lowerCamelCase )
else:
raise ValueError('''Either arr or size must be specified''' )
def snake_case ( self : Union[str, Any], lowerCamelCase : list[int] )-> None:
lowerCamelCase__ : Tuple =len(lowerCamelCase )
lowerCamelCase__ : int =deepcopy(lowerCamelCase )
for i in range(1, self.size ):
lowerCamelCase__ : Dict =self.next_(lowerCamelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def snake_case ( self : Optional[int] )-> list[int]:
lowerCamelCase__ : List[str] =self.tree[:]
for i in range(self.size - 1, 0, -1 ):
lowerCamelCase__ : Optional[Any] =self.next_(lowerCamelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def snake_case ( lowerCamelCase : int )-> int:
return index + (index & (-index))
@staticmethod
def snake_case ( lowerCamelCase : int )-> int:
return index - (index & (-index))
def snake_case ( self : Any, lowerCamelCase : int, lowerCamelCase : int )-> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCamelCase__ : List[str] =self.next_(lowerCamelCase )
def snake_case ( self : List[str], lowerCamelCase : int, lowerCamelCase : int )-> None:
self.add(lowerCamelCase, value - self.get(lowerCamelCase ) )
def snake_case ( self : Union[str, Any], lowerCamelCase : int )-> int:
if right == 0:
return 0
lowerCamelCase__ : Dict =self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCamelCase__ : Union[str, Any] =self.prev(lowerCamelCase )
return result
def snake_case ( self : str, lowerCamelCase : int, lowerCamelCase : int )-> int:
return self.prefix(lowerCamelCase ) - self.prefix(lowerCamelCase )
def snake_case ( self : List[str], lowerCamelCase : int )-> int:
return self.query(lowerCamelCase, index + 1 )
def snake_case ( self : List[Any], lowerCamelCase : int )-> int:
value -= self.tree[0]
if value < 0:
return -1
lowerCamelCase__ : Optional[int] =1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCamelCase__ : Union[str, Any] =0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =AutoConfig.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : Any =FlaxAutoModelForSeqaSeqLM.from_config(config=__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ='''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
lowerCamelCase__ : List[str] ='''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowerCamelCase__ : List[Any] ='''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[Any] ='''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : List[Any] =f'''layers_{str(__lowerCamelCase )}'''
# Self-Attention
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : str =tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
lowerCamelCase__ : List[Any] =tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowerCamelCase__ : Dict =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowerCamelCase__ : List[str] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : Tuple =tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowerCamelCase__ : str =flax_model.params['''encoder''']['''block'''][str(__lowerCamelCase )]['''layer''']
lowerCamelCase__ : int =tax_attention_key
lowerCamelCase__ : Optional[int] =tax_attention_out
lowerCamelCase__ : List[Any] =tax_attention_query
lowerCamelCase__ : Optional[Any] =tax_attention_value
lowerCamelCase__ : List[str] =tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[int] =tax_global_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Optional[int] =tax_mlp_wi_a
lowerCamelCase__ : Optional[int] =tax_mlp_wi_a
else:
lowerCamelCase__ : Union[str, Any] =tax_mlp_wi
lowerCamelCase__ : str =tax_mlp_wo
lowerCamelCase__ : Optional[Any] =tax_mlp_layer_norm
lowerCamelCase__ : Optional[int] =flax_model_encoder_layer_block
# Only for layer 0:
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : str =tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : Optional[int] =tax_encoder_global_rel_embedding
# Assigning
lowerCamelCase__ : int =tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
lowerCamelCase__ : List[Any] =tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : Dict =f'''layers_{str(__lowerCamelCase )}'''
# Self-Attention
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
lowerCamelCase__ : Optional[int] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
lowerCamelCase__ : int =tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
lowerCamelCase__ : List[Any] =tax_enc_dec_attention_module['''key''']['''kernel''']
lowerCamelCase__ : Any =tax_enc_dec_attention_module['''out''']['''kernel''']
lowerCamelCase__ : Dict =tax_enc_dec_attention_module['''query''']['''kernel''']
lowerCamelCase__ : List[str] =tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
lowerCamelCase__ : Any =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
lowerCamelCase__ : List[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
lowerCamelCase__ : Optional[Any] =tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
lowerCamelCase__ : str =tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
lowerCamelCase__ : str =flax_model.params['''decoder''']['''block'''][str(__lowerCamelCase )]['''layer''']
lowerCamelCase__ : Union[str, Any] =tax_attention_key
lowerCamelCase__ : str =tax_attention_out
lowerCamelCase__ : Optional[int] =tax_attention_query
lowerCamelCase__ : Dict =tax_attention_value
lowerCamelCase__ : List[str] =tax_pre_attention_layer_norm
lowerCamelCase__ : List[Any] =tax_enc_dec_attention_key
lowerCamelCase__ : Any =tax_enc_dec_attention_out
lowerCamelCase__ : Any =tax_enc_dec_attention_query
lowerCamelCase__ : Optional[int] =tax_enc_dec_attention_value
lowerCamelCase__ : Dict =tax_cross_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Tuple =tax_mlp_wi_a
lowerCamelCase__ : int =tax_mlp_wi_a
else:
lowerCamelCase__ : List[Any] =tax_mlp_wi
lowerCamelCase__ : Dict =tax_mlp_wo
lowerCamelCase__ : Tuple =txa_mlp_layer_norm
lowerCamelCase__ : Optional[Any] =flax_model_decoder_layer_block
# Decoder Normalization
lowerCamelCase__ : Dict =tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
lowerCamelCase__ : int =txa_decoder_norm
# Only for layer 0:
lowerCamelCase__ : Tuple =tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
lowerCamelCase__ : Tuple =tax_decoder_rel_embedding
# Token Embeddings
lowerCamelCase__ : Union[str, Any] =tax_model['''target''']['''token_embedder''']['''embedding''']
lowerCamelCase__ : Dict =txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowerCamelCase__ : int =tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(__lowerCamelCase )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
_lowercase : List[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 625 | 0 |
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int ):
"""simple docstring"""
if len(__lowerCamelCase ) < k or k < 0:
raise ValueError('''Invalid Input''' )
lowerCamelCase__ : Dict =sum(array[:k] )
for i in range(len(__lowerCamelCase ) - k ):
lowerCamelCase__ : str =current_sum - array[i] + array[i + k]
lowerCamelCase__ : Optional[int] =max(__lowerCamelCase , __lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
_lowercase : Dict = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0_0)]
_lowercase : Dict = randint(0, 1_1_0)
print(f'The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}')
| 710 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str]=13, lowerCamelCase : List[Any]=32, lowerCamelCase : Dict=3, lowerCamelCase : int=4, lowerCamelCase : str=[10, 20, 30, 40], lowerCamelCase : Any=[2, 2, 3, 2], lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : str=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Optional[int]=10, lowerCamelCase : Any=0.02, lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"], lowerCamelCase : Optional[int]=3, lowerCamelCase : Tuple=None, )-> List[str]:
lowerCamelCase__ : List[str] =parent
lowerCamelCase__ : Tuple =batch_size
lowerCamelCase__ : str =image_size
lowerCamelCase__ : Any =num_channels
lowerCamelCase__ : Tuple =num_stages
lowerCamelCase__ : List[str] =hidden_sizes
lowerCamelCase__ : Any =depths
lowerCamelCase__ : Union[str, Any] =is_training
lowerCamelCase__ : Tuple =use_labels
lowerCamelCase__ : int =intermediate_size
lowerCamelCase__ : Optional[int] =hidden_act
lowerCamelCase__ : Dict =type_sequence_label_size
lowerCamelCase__ : Tuple =initializer_range
lowerCamelCase__ : Any =out_features
lowerCamelCase__ : Tuple =num_labels
lowerCamelCase__ : Optional[int] =scope
lowerCamelCase__ : Optional[int] =num_stages
def snake_case ( self : str )-> Optional[int]:
lowerCamelCase__ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple =None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int =self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] )-> Any:
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def snake_case ( self : Union[str, Any] )-> Any:
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowerCamelCase, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowerCamelCase, loss_ignore_index=255, num_labels=self.num_labels, )
def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any] )-> Tuple:
lowerCamelCase__ : List[str] =UperNetForSemanticSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Dict =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Any =config_and_inputs
lowerCamelCase__ : Optional[int] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_a = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
_a = False
_a = False
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =UperNetModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def snake_case ( self : Optional[int] )-> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] )-> Dict:
return
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ , lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
lowerCamelCase__ : Tuple =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple =[*signature.parameters.keys()]
lowerCamelCase__ : List[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def snake_case ( self : Any )-> Union[str, Any]:
lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def snake_case ( self : Optional[Any] )-> List[Any]:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def snake_case ( self : Any )-> List[str]:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : int )-> Any:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self : Dict )-> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case ( self : List[Any] )-> List[str]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : Tuple )-> str:
pass
def snake_case ( self : Optional[int] )-> List[str]:
def check_hidden_states_output(lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[str] ):
lowerCamelCase__ : Union[str, Any] =model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowerCamelCase__ : Optional[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : List[str] =self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] =True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : str =_config_zero_init(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =_config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] =model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def snake_case ( self : Any )-> str:
pass
@slow
def snake_case ( self : int )-> Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : str =UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
lowerCamelCase__ : List[str] =Image.open(__lowerCamelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : str )-> Union[str, Any]:
lowerCamelCase__ : List[Any] =AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
lowerCamelCase__ : List[Any] =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : List[Any] =prepare_img()
lowerCamelCase__ : List[Any] =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : List[Any] =model(**lowerCamelCase )
lowerCamelCase__ : Optional[int] =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : Dict =torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : str =AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
lowerCamelCase__ : Tuple =UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowerCamelCase )
lowerCamelCase__ : Dict =prepare_img()
lowerCamelCase__ : Any =processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
with torch.no_grad():
lowerCamelCase__ : Any =model(**lowerCamelCase )
lowerCamelCase__ : Dict =torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowerCamelCase__ : List[str] =torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCamelCase, atol=1E-4 ) )
| 625 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_lowercase : Optional[int] = logging.get_logger(__name__)
_lowercase : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_lowercase : str = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_lowercase : str = {
"allenai/led-base-16384": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCamelCase__ : Optional[int] =bs[:]
lowerCamelCase__ : Dict =0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase__ : str =[chr(__lowerCamelCase ) for n in cs]
return dict(zip(__lowerCamelCase , __lowerCamelCase ) )
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Dict =set()
lowerCamelCase__ : Optional[Any] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase__ : str =char
return pairs
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
def __init__( self : str, lowerCamelCase : List[Any], lowerCamelCase : int, lowerCamelCase : Optional[Any]="replace", lowerCamelCase : List[str]="<s>", lowerCamelCase : Union[str, Any]="</s>", lowerCamelCase : List[Any]="</s>", lowerCamelCase : Tuple="<s>", lowerCamelCase : int="<unk>", lowerCamelCase : Optional[int]="<pad>", lowerCamelCase : Tuple="<mask>", lowerCamelCase : Tuple=False, **lowerCamelCase : Optional[int], )-> Optional[Any]:
lowerCamelCase__ : Optional[Any] =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else bos_token
lowerCamelCase__ : str =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else eos_token
lowerCamelCase__ : Optional[Any] =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else sep_token
lowerCamelCase__ : Dict =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else cls_token
lowerCamelCase__ : int =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else unk_token
lowerCamelCase__ : List[str] =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : Any =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else mask_token
super().__init__(
errors=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token=lowerCamelCase, add_prefix_space=lowerCamelCase, **lowerCamelCase, )
with open(lowerCamelCase, encoding='''utf-8''' ) as vocab_handle:
lowerCamelCase__ : Tuple =json.load(lowerCamelCase )
lowerCamelCase__ : Optional[int] ={v: k for k, v in self.encoder.items()}
lowerCamelCase__ : Tuple =errors # how to handle errors in decoding
lowerCamelCase__ : Optional[Any] =bytes_to_unicode()
lowerCamelCase__ : Optional[Any] ={v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase, encoding='''utf-8''' ) as merges_handle:
lowerCamelCase__ : Any =merges_handle.read().split('''\n''' )[1:-1]
lowerCamelCase__ : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase__ : List[str] =dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowerCamelCase__ : Optional[Any] ={}
lowerCamelCase__ : Optional[int] =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase__ : Optional[Any] =re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def snake_case ( self : str )-> List[Any]:
return len(self.encoder )
def snake_case ( self : Optional[Any] )-> Optional[Any]:
return dict(self.encoder, **self.added_tokens_encoder )
def snake_case ( self : Optional[int], lowerCamelCase : Union[str, Any] )-> Tuple:
if token in self.cache:
return self.cache[token]
lowerCamelCase__ : Tuple =tuple(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
lowerCamelCase__ : List[Any] =min(lowerCamelCase, key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase, float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase__ : List[Any] =bigram
lowerCamelCase__ : Tuple =[]
lowerCamelCase__ : List[str] =0
while i < len(lowerCamelCase ):
try:
lowerCamelCase__ : str =word.index(lowerCamelCase, lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase__ : str =j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase__ : Optional[int] =tuple(lowerCamelCase )
lowerCamelCase__ : int =new_word
if len(lowerCamelCase ) == 1:
break
else:
lowerCamelCase__ : Dict =get_pairs(lowerCamelCase )
lowerCamelCase__ : Tuple =''' '''.join(lowerCamelCase )
lowerCamelCase__ : List[Any] =word
return word
def snake_case ( self : List[Any], lowerCamelCase : List[str] )-> List[Any]:
lowerCamelCase__ : Tuple =[]
for token in re.findall(self.pat, lowerCamelCase ):
lowerCamelCase__ : str =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(''' ''' ) )
return bpe_tokens
def snake_case ( self : List[Any], lowerCamelCase : str )-> str:
return self.encoder.get(lowerCamelCase, self.encoder.get(self.unk_token ) )
def snake_case ( self : Optional[Any], lowerCamelCase : Any )-> Union[str, Any]:
return self.decoder.get(lowerCamelCase )
def snake_case ( self : Optional[Any], lowerCamelCase : int )-> List[Any]:
lowerCamelCase__ : Dict =''''''.join(lowerCamelCase )
lowerCamelCase__ : List[str] =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors )
return text
def snake_case ( self : List[Any], lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : Dict =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ : List[str] =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCamelCase, ensure_ascii=lowerCamelCase ) + '''\n''' )
lowerCamelCase__ : Union[str, Any] =0
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowerCamelCase__ : List[Any] =token_index
writer.write(''' '''.join(lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def snake_case ( self : Optional[int], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : Dict =[self.cls_token_id]
lowerCamelCase__ : Any =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self : Tuple, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None, lowerCamelCase : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase, token_ids_a=lowerCamelCase, already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def snake_case ( self : Dict, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None )-> List[int]:
lowerCamelCase__ : int =[self.sep_token_id]
lowerCamelCase__ : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self : int, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any]=False, **lowerCamelCase : Tuple )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =kwargs.pop('''add_prefix_space''', self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()):
lowerCamelCase__ : List[Any] =''' ''' + text
return (text, kwargs)
def snake_case ( self : List[str], lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding], lowerCamelCase : Optional[int] = None, lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[bool] = None, )-> dict:
lowerCamelCase__ : str =super()._pad(
encoded_inputs=lowerCamelCase, max_length=lowerCamelCase, padding_strategy=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, )
# Load from model defaults
if return_attention_mask is None:
lowerCamelCase__ : Dict ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCamelCase__ : Tuple =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCamelCase__ : Optional[Any] =len(encoded_inputs['''global_attention_mask'''] ) != len(lowerCamelCase )
if needs_to_be_padded:
lowerCamelCase__ : Tuple =len(lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCamelCase__ : Any =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowerCamelCase__ : Dict =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 711 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
_a = ['onnx']
def __init__( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : str )-> Optional[int]:
requires_backends(self, ['''onnx'''] )
@classmethod
def snake_case ( cls : List[str], *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
@classmethod
def snake_case ( cls : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : Tuple )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
| 625 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def snake_case ( *lowerCamelCase : Optional[Any], **lowerCamelCase : Any )-> List[Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case ( self : Tuple, lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : str )-> str:
lowerCamelCase__ : Dict =ObjectDetectionPipeline(model=lowerCamelCase, image_processor=lowerCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case ( self : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : Tuple )-> List[str]:
lowerCamelCase__ : List[Any] =object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''', threshold=0.0 )
self.assertGreater(len(lowerCamelCase ), 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase, {
'''score''': ANY(lowerCamelCase ),
'''label''': ANY(lowerCamelCase ),
'''box''': {'''xmin''': ANY(lowerCamelCase ), '''ymin''': ANY(lowerCamelCase ), '''xmax''': ANY(lowerCamelCase ), '''ymax''': ANY(lowerCamelCase )},
}, )
import datasets
lowerCamelCase__ : int =datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''', '''image''', split='''test''' )
lowerCamelCase__ : Optional[Any] =[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
lowerCamelCase__ : Dict =object_detector(lowerCamelCase, threshold=0.0 )
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(lowerCamelCase ), 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase, {
'''score''': ANY(lowerCamelCase ),
'''label''': ANY(lowerCamelCase ),
'''box''': {'''xmin''': ANY(lowerCamelCase ), '''ymin''': ANY(lowerCamelCase ), '''xmax''': ANY(lowerCamelCase ), '''ymax''': ANY(lowerCamelCase )},
}, )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def snake_case ( self : List[str] )-> Dict:
pass
@require_torch
def snake_case ( self : int )-> Union[str, Any]:
lowerCamelCase__ : int ='''hf-internal-testing/tiny-detr-mobilenetsv3'''
lowerCamelCase__ : List[Any] =AutoModelForObjectDetection.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =AutoFeatureExtractor.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =ObjectDetectionPipeline(model=lowerCamelCase, feature_extractor=lowerCamelCase )
lowerCamelCase__ : str =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''', threshold=0.0 )
self.assertEqual(
nested_simplify(lowerCamelCase, decimals=4 ), [
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
], )
lowerCamelCase__ : Union[str, Any] =object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
], threshold=0.0, )
self.assertEqual(
nested_simplify(lowerCamelCase, decimals=4 ), [
[
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
[
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
], )
@require_torch
@slow
def snake_case ( self : Dict )-> Union[str, Any]:
lowerCamelCase__ : List[Any] ='''facebook/detr-resnet-50'''
lowerCamelCase__ : Dict =AutoModelForObjectDetection.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =AutoFeatureExtractor.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Any =ObjectDetectionPipeline(model=lowerCamelCase, feature_extractor=lowerCamelCase )
lowerCamelCase__ : Tuple =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(lowerCamelCase, decimals=4 ), [
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
], )
lowerCamelCase__ : Tuple =object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(lowerCamelCase, decimals=4 ), [
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
], )
@require_torch
@slow
def snake_case ( self : Optional[Any] )-> List[Any]:
lowerCamelCase__ : List[Any] ='''facebook/detr-resnet-50'''
lowerCamelCase__ : Union[str, Any] =pipeline('''object-detection''', model=lowerCamelCase )
lowerCamelCase__ : Tuple =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(lowerCamelCase, decimals=4 ), [
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
], )
lowerCamelCase__ : Dict =object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(lowerCamelCase, decimals=4 ), [
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
], )
@require_torch
@slow
def snake_case ( self : Any )-> List[Any]:
lowerCamelCase__ : Tuple =0.9_985
lowerCamelCase__ : int ='''facebook/detr-resnet-50'''
lowerCamelCase__ : Optional[int] =pipeline('''object-detection''', model=lowerCamelCase )
lowerCamelCase__ : Any =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''', threshold=lowerCamelCase )
self.assertEqual(
nested_simplify(lowerCamelCase, decimals=4 ), [
{'''score''': 0.9_988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
], )
@require_torch
@require_pytesseract
@slow
def snake_case ( self : Union[str, Any] )-> Union[str, Any]:
lowerCamelCase__ : Union[str, Any] ='''Narsil/layoutlmv3-finetuned-funsd'''
lowerCamelCase__ : Dict =0.9_993
lowerCamelCase__ : List[str] =pipeline('''object-detection''', model=lowerCamelCase, threshold=lowerCamelCase )
lowerCamelCase__ : List[Any] =object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(lowerCamelCase, decimals=4 ), [
{'''score''': 0.9_993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
{'''score''': 0.9_993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
], )
| 712 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =x
lowerCamelCase__ : Any =y
for step in range(__lowerCamelCase ): # noqa: B007
lowerCamelCase__ : List[Any] =a * a - b * b + x
lowerCamelCase__ : Optional[int] =2 * a * b + y
lowerCamelCase__ : Union[str, Any] =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) )
def snake_case__ ( __lowerCamelCase : int = 800 , __lowerCamelCase : int = 600 , __lowerCamelCase : float = -0.6 , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 3.2 , __lowerCamelCase : int = 50 , __lowerCamelCase : bool = True , ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =Image.new('''RGB''' , (image_width, image_height) )
lowerCamelCase__ : Optional[int] =img.load()
# loop through the image-coordinates
for image_x in range(__lowerCamelCase ):
for image_y in range(__lowerCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase__ : Optional[Any] =figure_width / image_width * image_height
lowerCamelCase__ : Dict =figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase__ : Optional[int] =figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase__ : Any =get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase__ : int =get_color_coded_rgb(__lowerCamelCase )
else:
lowerCamelCase__ : Optional[int] =get_black_and_white_rgb(__lowerCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowercase : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 625 | 0 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
assert (
isinstance(__lowerCamelCase , __lowerCamelCase ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
lowerCamelCase__ : int =1, 1
for _ in range(number_of_steps - 1 ):
lowerCamelCase__ : List[Any] =current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =VideoMAEConfig()
set_architecture_configs(__lowerCamelCase , __lowerCamelCase )
if "finetuned" not in model_name:
lowerCamelCase__ : int =False
if "finetuned" in model_name:
lowerCamelCase__ : str ='''huggingface/label-files'''
if "kinetics" in model_name:
lowerCamelCase__ : List[Any] =400
lowerCamelCase__ : Optional[int] ='''kinetics400-id2label.json'''
elif "ssv2" in model_name:
lowerCamelCase__ : Tuple =174
lowerCamelCase__ : Optional[Any] ='''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
lowerCamelCase__ : Optional[int] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : List[Any] ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Dict =idalabel
lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()}
return config
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if "small" in model_name:
lowerCamelCase__ : Optional[Any] =384
lowerCamelCase__ : List[Any] =1536
lowerCamelCase__ : int =12
lowerCamelCase__ : Dict =16
lowerCamelCase__ : List[Any] =12
lowerCamelCase__ : Optional[Any] =3
lowerCamelCase__ : Union[str, Any] =192
lowerCamelCase__ : str =768
elif "large" in model_name:
lowerCamelCase__ : Union[str, Any] =1024
lowerCamelCase__ : str =4096
lowerCamelCase__ : int =24
lowerCamelCase__ : Dict =16
lowerCamelCase__ : Union[str, Any] =12
lowerCamelCase__ : List[Any] =8
lowerCamelCase__ : int =512
lowerCamelCase__ : Optional[Any] =2048
elif "huge" in model_name:
lowerCamelCase__ : Optional[int] =1280
lowerCamelCase__ : Optional[int] =5120
lowerCamelCase__ : List[Any] =32
lowerCamelCase__ : List[Any] =16
lowerCamelCase__ : Optional[Any] =12
lowerCamelCase__ : Dict =8
lowerCamelCase__ : List[Any] =640
lowerCamelCase__ : Any =2560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
if "encoder." in name:
lowerCamelCase__ : Optional[int] =name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
lowerCamelCase__ : List[Any] =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase__ : Any =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase__ : List[Any] =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowerCamelCase__ : Dict =name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
lowerCamelCase__ : List[str] =name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
lowerCamelCase__ : Tuple =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase__ : List[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : int =name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowerCamelCase__ : Any =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowerCamelCase__ : Any =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : str =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
lowerCamelCase__ : List[str] =name.replace('''head''' , '''classifier''' )
return name
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Dict =orig_state_dict.pop(__lowerCamelCase )
if key.startswith('''encoder.''' ):
lowerCamelCase__ : Optional[int] =key.replace('''encoder.''' , '''''' )
if "qkv" in key:
lowerCamelCase__ : Any =key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
lowerCamelCase__ : Tuple =config.decoder_hidden_size
lowerCamelCase__ : str =int(key_split[2] )
lowerCamelCase__ : Any ='''decoder.decoder_layers.'''
if "weight" in key:
lowerCamelCase__ : List[Any] =val[:dim, :]
lowerCamelCase__ : Any =val[dim : dim * 2, :]
lowerCamelCase__ : Dict =val[-dim:, :]
else:
lowerCamelCase__ : Optional[Any] =config.hidden_size
lowerCamelCase__ : Optional[Any] =int(key_split[1] )
lowerCamelCase__ : str ='''videomae.encoder.layer.'''
if "weight" in key:
lowerCamelCase__ : int =val[:dim, :]
lowerCamelCase__ : Tuple =val[dim : dim * 2, :]
lowerCamelCase__ : List[Any] =val[-dim:, :]
else:
lowerCamelCase__ : int =val
return orig_state_dict
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCamelCase__ : Optional[Any] =np.load(__lowerCamelCase )
return list(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : str =get_videomae_config(__lowerCamelCase )
if "finetuned" in model_name:
lowerCamelCase__ : Tuple =VideoMAEForVideoClassification(__lowerCamelCase )
else:
lowerCamelCase__ : int =VideoMAEForPreTraining(__lowerCamelCase )
# download original checkpoint, hosted on Google Drive
lowerCamelCase__ : Union[str, Any] ='''pytorch_model.bin'''
gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )
if "model" in files:
lowerCamelCase__ : Dict =files['''model''']
else:
lowerCamelCase__ : str =files['''module''']
lowerCamelCase__ : Optional[Any] =convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify model on basic input
lowerCamelCase__ : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCamelCase__ : int =prepare_video()
lowerCamelCase__ : Tuple =image_processor(__lowerCamelCase , return_tensors='''pt''' )
if "finetuned" not in model_name:
lowerCamelCase__ : Tuple =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase )
lowerCamelCase__ : int =model(**__lowerCamelCase )
lowerCamelCase__ : Dict =outputs.logits
lowerCamelCase__ : List[str] =[
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCamelCase__ : int =torch.Size([1, 174] )
lowerCamelCase__ : Dict =torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
lowerCamelCase__ : List[str] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
lowerCamelCase__ : List[Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[str] =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCamelCase__ : str =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[Any] =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : Optional[int] =torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCamelCase__ : List[str] =torch.Size([1, 400] )
lowerCamelCase__ : Dict =torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCamelCase__ : str =torch.Size([1, 400] )
lowerCamelCase__ : Any =torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
lowerCamelCase__ : Tuple =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCamelCase__ : Optional[int] =torch.Size([1, 174] )
lowerCamelCase__ : Any =torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : str =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCamelCase__ : str =torch.Size([1, 174] )
lowerCamelCase__ : int =torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCamelCase__ : str =outputs.loss
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__lowerCamelCase , organization='''nielsr''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 625 | 0 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
return 1.0 / (1.0 + np.exp(-_outputs ))
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =np.max(_outputs , axis=-1 , keepdims=__lowerCamelCase )
lowerCamelCase__ : Any =np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'sigmoid'
_a = 'softmax'
_a = 'none'
@add_end_docstrings(
lowerCAmelCase_ , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = False
_a = ClassificationFunction.NONE
def __init__( self : Optional[int], **lowerCamelCase : str )-> List[str]:
super().__init__(**lowerCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def snake_case ( self : Any, lowerCamelCase : Tuple=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : int="", **lowerCamelCase : Union[str, Any] )-> Any:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
lowerCamelCase__ : Union[str, Any] =tokenizer_kwargs
lowerCamelCase__ : Optional[int] ={}
if hasattr(self.model.config, '''return_all_scores''' ) and return_all_scores is None:
lowerCamelCase__ : Dict =self.model.config.return_all_scores
if isinstance(lowerCamelCase, lowerCamelCase ) or top_k is None:
lowerCamelCase__ : str =top_k
lowerCamelCase__ : str =False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''', lowerCamelCase, )
if return_all_scores:
lowerCamelCase__ : int =None
else:
lowerCamelCase__ : Optional[int] =1
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Dict =ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowerCamelCase__ : List[str] =function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : int, *lowerCamelCase : Dict, **lowerCamelCase : Tuple )-> Union[str, Any]:
lowerCamelCase__ : str =super().__call__(*lowerCamelCase, **lowerCamelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowerCamelCase__ : Union[str, Any] ='''top_k''' not in kwargs
if isinstance(args[0], lowerCamelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def snake_case ( self : Union[str, Any], lowerCamelCase : str, **lowerCamelCase : Dict )-> Dict[str, GenericTensor]:
lowerCamelCase__ : Union[str, Any] =self.framework
if isinstance(lowerCamelCase, lowerCamelCase ):
return self.tokenizer(**lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase )
elif isinstance(lowerCamelCase, lowerCamelCase ) and len(lowerCamelCase ) == 1 and isinstance(inputs[0], lowerCamelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0], text_pair=inputs[0][1], return_tensors=lowerCamelCase, **lowerCamelCase )
elif isinstance(lowerCamelCase, lowerCamelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase )
def snake_case ( self : Optional[Any], lowerCamelCase : Tuple )-> int:
return self.model(**lowerCamelCase )
def snake_case ( self : Optional[Any], lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=None, lowerCamelCase : Optional[Any]=1, lowerCamelCase : List[Any]=True )-> List[str]:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowerCamelCase__ : List[str] =ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowerCamelCase__ : Optional[int] =ClassificationFunction.SOFTMAX
elif hasattr(self.model.config, '''function_to_apply''' ) and function_to_apply is None:
lowerCamelCase__ : Tuple =self.model.config.function_to_apply
else:
lowerCamelCase__ : str =ClassificationFunction.NONE
lowerCamelCase__ : int =model_outputs['''logits'''][0]
lowerCamelCase__ : Tuple =outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowerCamelCase__ : List[str] =sigmoid(lowerCamelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowerCamelCase__ : int =softmax(lowerCamelCase )
elif function_to_apply == ClassificationFunction.NONE:
lowerCamelCase__ : List[str] =outputs
else:
raise ValueError(F'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowerCamelCase__ : Tuple =[
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(lowerCamelCase )
]
if not _legacy:
dict_scores.sort(key=lambda lowerCamelCase : x["score"], reverse=lowerCamelCase )
if top_k is not None:
lowerCamelCase__ : Dict =dict_scores[:top_k]
return dict_scores
| 714 |
"""simple docstring"""
_lowercase : str = 0 # The first color of the flag.
_lowercase : Dict = 1 # The second color of the flag.
_lowercase : Tuple = 2 # The third color of the flag.
_lowercase : Optional[int] = (red, white, blue)
def snake_case__ ( __lowerCamelCase : list ):
"""simple docstring"""
if not sequence:
return []
if len(__lowerCamelCase ) == 1:
return list(__lowerCamelCase )
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : Dict =len(__lowerCamelCase ) - 1
lowerCamelCase__ : Tuple =0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =sequence[high], sequence[mid]
high -= 1
else:
lowerCamelCase__ : Dict =f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[Any] = input("Enter numbers separated by commas:\n").strip()
_lowercase : int = [int(item.strip()) for item in user_input.split(",")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 625 | 0 |
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str], lowerCamelCase : Callable, lowerCamelCase : Optional[Features] = None, lowerCamelCase : str = None, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : Optional[dict] = None, lowerCamelCase : Optional[int] = None, **lowerCamelCase : Optional[int], )-> Optional[int]:
super().__init__(
features=lowerCamelCase, cache_dir=lowerCamelCase, keep_in_memory=lowerCamelCase, streaming=lowerCamelCase, num_proc=lowerCamelCase, **lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =Generator(
cache_dir=lowerCamelCase, features=lowerCamelCase, generator=lowerCamelCase, gen_kwargs=lowerCamelCase, **lowerCamelCase, )
def snake_case ( self : Union[str, Any] )-> Optional[int]:
# Build iterable dataset
if self.streaming:
lowerCamelCase__ : Any =self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
lowerCamelCase__ : List[str] =None
lowerCamelCase__ : List[str] =None
lowerCamelCase__ : Dict =None
lowerCamelCase__ : Optional[Any] =None
self.builder.download_and_prepare(
download_config=lowerCamelCase, download_mode=lowerCamelCase, verification_mode=lowerCamelCase, base_path=lowerCamelCase, num_proc=self.num_proc, )
lowerCamelCase__ : Dict =self.builder.as_dataset(
split='''train''', verification_mode=lowerCamelCase, in_memory=self.keep_in_memory )
return dataset
| 715 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = StableUnCLIPImgaImgPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def snake_case ( self : List[str] )-> str:
lowerCamelCase__ : Dict =32
lowerCamelCase__ : Optional[Any] =embedder_hidden_size
# image encoding components
lowerCamelCase__ : Dict =CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] =CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
lowerCamelCase__ : Dict =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase__ : Tuple =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
lowerCamelCase__ : Dict =UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =DDIMScheduler(
beta_schedule='''scaled_linear''', beta_start=0.00_085, beta_end=0.012, prediction_type='''v_prediction''', set_alpha_to_one=lowerCamelCase, steps_offset=1, )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =AutoencoderKL()
lowerCamelCase__ : int ={
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Any=0, lowerCamelCase : str=True )-> List[str]:
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : List[Any] =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : Dict =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
lowerCamelCase__ : int =input_image * 0.5 + 0.5
lowerCamelCase__ : Dict =input_image.clamp(0, 1 )
lowerCamelCase__ : List[str] =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase__ : Dict =DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def snake_case ( self : List[str] )-> Optional[Any]:
lowerCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : int =StableUnCLIPImgaImgPipeline(**lowerCamelCase )
lowerCamelCase__ : Any =sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase )
inputs.update({'''image_embeds''': None} )
lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase ).images
lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ : Union[str, Any] =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Tuple =torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def snake_case ( self : int )-> Optional[Any]:
lowerCamelCase__ : List[Any] =torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def snake_case ( self : List[str] )-> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : int =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Any =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : List[Any] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[int] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : str =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Tuple =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : Tuple =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : Any =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : List[Any] =pipe(
lowerCamelCase, '''anime turtle''', num_inference_steps=2, output_type='''np''', )
lowerCamelCase__ : Optional[int] =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 625 | 0 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_a = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : Optional[int] = None, lowerCamelCase : int = 5_0257, lowerCamelCase : int = 1024, lowerCamelCase : int = 768, lowerCamelCase : int = 12, lowerCamelCase : int = 12, lowerCamelCase : Optional[int] = None, lowerCamelCase : str = "gelu_new", lowerCamelCase : float = 0.1, lowerCamelCase : float = 0.1, lowerCamelCase : float = 0.1, lowerCamelCase : float = 1E-5, lowerCamelCase : float = 0.02, lowerCamelCase : bool = True, lowerCamelCase : bool = True, lowerCamelCase : bool = False, lowerCamelCase : bool = False, )-> Optional[Any]:
super().__init__()
lowerCamelCase__ : List[Any] =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
lowerCamelCase__ : List[Any] =prefix_inner_dim
lowerCamelCase__ : Tuple =prefix_hidden_dim
lowerCamelCase__ : Any =(
nn.Linear(self.prefix_inner_dim, self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCamelCase__ : List[str] =(
nn.Linear(self.prefix_hidden_dim, lowerCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCamelCase__ : int =GPTaConfig(
vocab_size=lowerCamelCase, n_positions=lowerCamelCase, n_embd=lowerCamelCase, n_layer=lowerCamelCase, n_head=lowerCamelCase, n_inner=lowerCamelCase, activation_function=lowerCamelCase, resid_pdrop=lowerCamelCase, embd_pdrop=lowerCamelCase, attn_pdrop=lowerCamelCase, layer_norm_epsilon=lowerCamelCase, initializer_range=lowerCamelCase, scale_attn_weights=lowerCamelCase, use_cache=lowerCamelCase, scale_attn_by_inverse_layer_idx=lowerCamelCase, reorder_and_upcast_attn=lowerCamelCase, )
lowerCamelCase__ : Tuple =GPTaLMHeadModel(lowerCamelCase )
def snake_case ( self : List[str], lowerCamelCase : torch.Tensor, lowerCamelCase : torch.Tensor, lowerCamelCase : Optional[torch.Tensor] = None, lowerCamelCase : Optional[torch.Tensor] = None, )-> int:
lowerCamelCase__ : int =self.transformer.transformer.wte(lowerCamelCase )
lowerCamelCase__ : Dict =self.encode_prefix(lowerCamelCase )
lowerCamelCase__ : Tuple =self.decode_prefix(lowerCamelCase )
lowerCamelCase__ : List[Any] =torch.cat((prefix_embeds, embedding_text), dim=1 )
if labels is not None:
lowerCamelCase__ : Dict =self.get_dummy_token(input_ids.shape[0], input_ids.device )
lowerCamelCase__ : List[str] =torch.cat((dummy_token, input_ids), dim=1 )
lowerCamelCase__ : Union[str, Any] =self.transformer(inputs_embeds=lowerCamelCase, labels=lowerCamelCase, attention_mask=lowerCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case ( self : List[str], lowerCamelCase : int, lowerCamelCase : torch.device )-> torch.Tensor:
return torch.zeros(lowerCamelCase, self.prefix_length, dtype=torch.intaa, device=lowerCamelCase )
def snake_case ( self : str, lowerCamelCase : int )-> int:
return self.encode_prefix(lowerCamelCase )
@torch.no_grad()
def snake_case ( self : Dict, lowerCamelCase : List[Any], lowerCamelCase : Any, lowerCamelCase : Optional[Any] )-> Tuple:
lowerCamelCase__ : Union[str, Any] =torch.split(lowerCamelCase, 1, dim=0 )
lowerCamelCase__ : List[Any] =[]
lowerCamelCase__ : Optional[int] =[]
for feature in features:
lowerCamelCase__ : List[str] =self.decode_prefix(feature.to(lowerCamelCase ) ) # back to the clip feature
# Only support beam search for now
lowerCamelCase__ : Union[str, Any] =self.generate_beam(
input_embeds=lowerCamelCase, device=lowerCamelCase, eos_token_id=lowerCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCamelCase__ : Union[str, Any] =torch.stack(lowerCamelCase )
lowerCamelCase__ : Any =torch.stack(lowerCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case ( self : Dict, lowerCamelCase : Dict=None, lowerCamelCase : int=None, lowerCamelCase : Any=None, lowerCamelCase : int = 5, lowerCamelCase : int = 67, lowerCamelCase : float = 1.0, lowerCamelCase : Optional[int] = None, )-> str:
lowerCamelCase__ : str =eos_token_id
lowerCamelCase__ : Any =None
lowerCamelCase__ : int =None
lowerCamelCase__ : List[str] =torch.ones(lowerCamelCase, device=lowerCamelCase, dtype=torch.int )
lowerCamelCase__ : Any =torch.zeros(lowerCamelCase, device=lowerCamelCase, dtype=torch.bool )
if input_embeds is not None:
lowerCamelCase__ : int =input_embeds
else:
lowerCamelCase__ : Optional[Any] =self.transformer.transformer.wte(lowerCamelCase )
for i in range(lowerCamelCase ):
lowerCamelCase__ : Any =self.transformer(inputs_embeds=lowerCamelCase )
lowerCamelCase__ : List[Any] =outputs.logits
lowerCamelCase__ : Tuple =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCamelCase__ : int =logits.softmax(-1 ).log()
if scores is None:
lowerCamelCase__ : List[str] =logits.topk(lowerCamelCase, -1 )
lowerCamelCase__ : Union[str, Any] =generated.expand(lowerCamelCase, *generated.shape[1:] )
lowerCamelCase__ : Union[str, Any] =next_tokens.permute(1, 0 ), scores.squeeze(0 )
if tokens is None:
lowerCamelCase__ : Tuple =next_tokens
else:
lowerCamelCase__ : Optional[Any] =tokens.expand(lowerCamelCase, *tokens.shape[1:] )
lowerCamelCase__ : Tuple =torch.cat((tokens, next_tokens), dim=1 )
else:
lowerCamelCase__ : Union[str, Any] =-float(np.inf )
lowerCamelCase__ : Dict =0
lowerCamelCase__ : Optional[Any] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCamelCase__ : Tuple =scores_sum / seq_lengths[:, None]
lowerCamelCase__ : Dict =scores_sum_average.view(-1 ).topk(lowerCamelCase, -1 )
lowerCamelCase__ : Optional[int] =next_tokens // scores_sum.shape[1]
lowerCamelCase__ : Tuple =seq_lengths[next_tokens_source]
lowerCamelCase__ : int =next_tokens % scores_sum.shape[1]
lowerCamelCase__ : Optional[int] =next_tokens.unsqueeze(1 )
lowerCamelCase__ : List[str] =tokens[next_tokens_source]
lowerCamelCase__ : Optional[Any] =torch.cat((tokens, next_tokens), dim=1 )
lowerCamelCase__ : Optional[Any] =generated[next_tokens_source]
lowerCamelCase__ : Any =scores_sum_average * seq_lengths
lowerCamelCase__ : List[str] =is_stopped[next_tokens_source]
lowerCamelCase__ : List[str] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0], 1, -1 )
lowerCamelCase__ : List[str] =torch.cat((generated, next_token_embed), dim=1 )
lowerCamelCase__ : Dict =is_stopped + next_tokens.eq(lowerCamelCase ).squeeze()
if is_stopped.all():
break
lowerCamelCase__ : Union[str, Any] =scores / seq_lengths
lowerCamelCase__ : Dict =scores.argsort(descending=lowerCamelCase )
# tokens tensors are already padded to max_seq_length
lowerCamelCase__ : int =[tokens[i] for i in order]
lowerCamelCase__ : Any =torch.stack(lowerCamelCase, dim=0 )
lowerCamelCase__ : Optional[Any] =torch.tensor([seq_lengths[i] for i in order], dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 716 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 4000000 ):
"""simple docstring"""
lowerCamelCase__ : Dict =[]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =b, a + b
return sum(__lowerCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 625 | 0 |
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
lowerCamelCase__ : Tuple =int(__lowerCamelCase )
lowerCamelCase__ : Dict =t // 3600, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=300 ):
"""simple docstring"""
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : List[str] ='''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
lowerCamelCase__ : Any =f'''{elt:.6f}''' if isinstance(__lowerCamelCase , __lowerCamelCase ) else str(__lowerCamelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = 5
_a = 0.2
def __init__( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[str] = None, lowerCamelCase : bool = True, lowerCamelCase : Optional["NotebookTrainingTracker"] = None, lowerCamelCase : int = 300, )-> Tuple:
lowerCamelCase__ : List[Any] =total
lowerCamelCase__ : int ='''''' if prefix is None else prefix
lowerCamelCase__ : Dict =leave
lowerCamelCase__ : Any =parent
lowerCamelCase__ : Dict =width
lowerCamelCase__ : str =None
lowerCamelCase__ : str =None
lowerCamelCase__ : Tuple =None
def snake_case ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : bool = False, lowerCamelCase : str = None )-> List[str]:
lowerCamelCase__ : Dict =value
if comment is not None:
lowerCamelCase__ : Optional[int] =comment
if self.last_value is None:
lowerCamelCase__ : str =time.time()
lowerCamelCase__ : Optional[Any] =value
lowerCamelCase__ : List[Any] =None
lowerCamelCase__ : Dict =self.warmup
lowerCamelCase__ : Tuple =1
self.update_bar(lowerCamelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total ):
if self.first_calls > 0:
self.first_calls -= 1
lowerCamelCase__ : Optional[Any] =time.time()
lowerCamelCase__ : Optional[Any] =current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
lowerCamelCase__ : Optional[Any] =self.elapsed_time / (value - self.start_value)
else:
lowerCamelCase__ : Tuple =None
if value >= self.total:
lowerCamelCase__ : Dict =self.total
lowerCamelCase__ : Tuple =None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
lowerCamelCase__ : List[Any] =self.average_time_per_item * (self.total - value)
self.update_bar(lowerCamelCase )
lowerCamelCase__ : Any =value
lowerCamelCase__ : List[str] =current_time
if self.average_time_per_item is None:
lowerCamelCase__ : str =1
else:
lowerCamelCase__ : str =max(int(self.update_every / self.average_time_per_item ), 1 )
def snake_case ( self : List[str], lowerCamelCase : int, lowerCamelCase : Dict=None )-> Optional[int]:
lowerCamelCase__ : int =''' ''' * (len(str(self.total ) ) - len(str(lowerCamelCase ) )) + str(lowerCamelCase )
if self.elapsed_time is None:
lowerCamelCase__ : Optional[int] =F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
lowerCamelCase__ : Optional[Any] =F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
lowerCamelCase__ : Dict =(
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def snake_case ( self : List[Any] )-> str:
lowerCamelCase__ : Optional[int] =html_progress_bar(self.value, self.total, self.prefix, self.label, self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
lowerCamelCase__ : Tuple =disp.display(disp.HTML(self.html_code ), display_id=lowerCamelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case ( self : Union[str, Any] )-> Any:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Any, lowerCamelCase : Optional[int], lowerCamelCase : Union[str, Any]=None )-> str:
super().__init__(lowerCamelCase )
lowerCamelCase__ : List[Any] =None if column_names is None else [column_names]
lowerCamelCase__ : Tuple =None
def snake_case ( self : int )-> int:
lowerCamelCase__ : Union[str, Any] =html_progress_bar(self.value, self.total, self.prefix, self.label, self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
lowerCamelCase__ : Union[str, Any] =disp.display(disp.HTML(self.html_code ), display_id=lowerCamelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case ( self : List[Any], lowerCamelCase : Any )-> Optional[int]:
if self.inner_table is None:
lowerCamelCase__ : List[str] =[list(values.keys() ), list(values.values() )]
else:
lowerCamelCase__ : Tuple =self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCamelCase )
lowerCamelCase__ : List[str] =columns
self.inner_table.append([values[c] for c in columns] )
def snake_case ( self : str, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[int]=None, lowerCamelCase : Tuple=300 )-> List[Any]:
lowerCamelCase__ : Optional[Any] =NotebookProgressBar(lowerCamelCase, prefix=lowerCamelCase, parent=self, width=lowerCamelCase )
return self.child_bar
def snake_case ( self : Union[str, Any] )-> List[str]:
lowerCamelCase__ : List[str] =None
self.display()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] )-> Dict:
lowerCamelCase__ : Optional[int] =None
lowerCamelCase__ : List[Any] =None
lowerCamelCase__ : Dict =False
def snake_case ( self : int, lowerCamelCase : Optional[int], lowerCamelCase : int, lowerCamelCase : Tuple, **lowerCamelCase : List[Any] )-> List[str]:
lowerCamelCase__ : Optional[int] ='''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : Tuple =0
lowerCamelCase__ : List[Any] =[self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
lowerCamelCase__ : List[str] =NotebookTrainingTracker(state.max_steps, lowerCamelCase )
def snake_case ( self : str, lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : Optional[Any], **lowerCamelCase : Union[str, Any] )-> List[str]:
lowerCamelCase__ : Optional[int] =int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1, comment=F'''Epoch {epoch}/{state.num_train_epochs}''', force_update=self._force_next_update, )
lowerCamelCase__ : Optional[Any] =False
def snake_case ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : Union[str, Any]=None, **lowerCamelCase : List[Any] )-> Optional[int]:
if not has_length(lowerCamelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
lowerCamelCase__ : Any =self.training_tracker.add_child(len(lowerCamelCase ) )
else:
lowerCamelCase__ : str =NotebookProgressBar(len(lowerCamelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def snake_case ( self : List[Any], lowerCamelCase : Any, lowerCamelCase : Any, lowerCamelCase : str, **lowerCamelCase : List[Any] )-> Union[str, Any]:
if self.prediction_bar is not None:
self.prediction_bar.close()
lowerCamelCase__ : List[Any] =None
def snake_case ( self : List[Any], lowerCamelCase : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : Optional[int], lowerCamelCase : List[Any]=None, **lowerCamelCase : Dict )-> List[Any]:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
lowerCamelCase__ : Dict ={'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
lowerCamelCase__ : Tuple =state.global_step
self.training_tracker.write_line(lowerCamelCase )
def snake_case ( self : Dict, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any]=None, **lowerCamelCase : Optional[int] )-> Optional[int]:
if self.training_tracker is not None:
lowerCamelCase__ : str ={'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
lowerCamelCase__ : Any =log['''loss''']
break
if self.first_column == "Epoch":
lowerCamelCase__ : str =int(state.epoch )
else:
lowerCamelCase__ : List[Any] =state.global_step
lowerCamelCase__ : List[Any] ='''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
lowerCamelCase__ : str =re.sub(r'''\_loss$''', '''''', lowerCamelCase )
lowerCamelCase__ : Dict =metrics.pop('''total_flos''', lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =metrics.pop('''epoch''', lowerCamelCase )
lowerCamelCase__ : str =metrics.pop(F'''{metric_key_prefix}_runtime''', lowerCamelCase )
lowerCamelCase__ : Tuple =metrics.pop(F'''{metric_key_prefix}_samples_per_second''', lowerCamelCase )
lowerCamelCase__ : Optional[Any] =metrics.pop(F'''{metric_key_prefix}_steps_per_second''', lowerCamelCase )
lowerCamelCase__ : Any =metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''', lowerCamelCase )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
lowerCamelCase__ : Any =v
else:
lowerCamelCase__ : List[Any] =k.split('''_''' )
lowerCamelCase__ : Tuple =''' '''.join([part.capitalize() for part in splits[1:]] )
lowerCamelCase__ : List[str] =v
self.training_tracker.write_line(lowerCamelCase )
self.training_tracker.remove_child()
lowerCamelCase__ : List[Any] =None
# Evaluation takes a long time so we should force the next update.
lowerCamelCase__ : Union[str, Any] =True
def snake_case ( self : List[str], lowerCamelCase : List[str], lowerCamelCase : Optional[int], lowerCamelCase : str, **lowerCamelCase : str )-> Union[str, Any]:
self.training_tracker.update(
state.global_step, comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''', force_update=lowerCamelCase )
lowerCamelCase__ : int =None
| 717 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = BlenderbotSmallConfig
_a = {}
_a = 'gelu'
def __init__( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=False, lowerCamelCase : Union[str, Any]=99, lowerCamelCase : str=32, lowerCamelCase : List[Any]=2, lowerCamelCase : Optional[int]=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=20, lowerCamelCase : int=2, lowerCamelCase : Any=1, lowerCamelCase : Optional[Any]=0, )-> List[str]:
lowerCamelCase__ : Any =parent
lowerCamelCase__ : Dict =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : Tuple =is_training
lowerCamelCase__ : Dict =use_labels
lowerCamelCase__ : List[Any] =vocab_size
lowerCamelCase__ : str =hidden_size
lowerCamelCase__ : str =num_hidden_layers
lowerCamelCase__ : Union[str, Any] =num_attention_heads
lowerCamelCase__ : Any =intermediate_size
lowerCamelCase__ : Dict =hidden_dropout_prob
lowerCamelCase__ : List[Any] =attention_probs_dropout_prob
lowerCamelCase__ : str =max_position_embeddings
lowerCamelCase__ : Optional[int] =eos_token_id
lowerCamelCase__ : str =pad_token_id
lowerCamelCase__ : Union[str, Any] =bos_token_id
def snake_case ( self : Any )-> Any:
lowerCamelCase__ : Any =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCamelCase__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCamelCase__ : Any =tf.concat([input_ids, eos_tensor], axis=1 )
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : int =self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCamelCase__ : Optional[int] =prepare_blenderbot_small_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return config, inputs_dict
def snake_case ( self : Any, lowerCamelCase : str, lowerCamelCase : Any )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =TFBlenderbotSmallModel(config=lowerCamelCase ).get_decoder()
lowerCamelCase__ : List[Any] =inputs_dict['''input_ids''']
lowerCamelCase__ : Optional[int] =input_ids[:1, :]
lowerCamelCase__ : str =inputs_dict['''attention_mask'''][:1, :]
lowerCamelCase__ : Union[str, Any] =inputs_dict['''head_mask''']
lowerCamelCase__ : Optional[Any] =1
# first forward pass
lowerCamelCase__ : Dict =model(lowerCamelCase, attention_mask=lowerCamelCase, head_mask=lowerCamelCase, use_cache=lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : List[str] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Union[str, Any] =ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Tuple =tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
lowerCamelCase__ : List[str] =tf.concat([input_ids, next_tokens], axis=-1 )
lowerCamelCase__ : str =tf.concat([attention_mask, next_attn_mask], axis=-1 )
lowerCamelCase__ : Optional[int] =model(lowerCamelCase, attention_mask=lowerCamelCase )[0]
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase, attention_mask=lowerCamelCase, past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : Tuple =int(ids_tensor((1,), output_from_past.shape[-1] ) )
lowerCamelCase__ : int =output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : List[str] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, rtol=1E-3 )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : List[str] =tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : str =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_a = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_a = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_a = True
_a = False
_a = False
def snake_case ( self : Any )-> str:
lowerCamelCase__ : Tuple =TFBlenderbotSmallModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self, config_class=lowerCamelCase )
def snake_case ( self : Any )-> Optional[int]:
self.config_tester.run_common_tests()
def snake_case ( self : int )-> str:
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
_a = 'facebook/blenderbot_small-90M'
@cached_property
def snake_case ( self : Any )-> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def snake_case ( self : int )-> List[Any]:
lowerCamelCase__ : str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Dict =self.tokenizer(self.src_text, return_tensors='''tf''' )
lowerCamelCase__ : Any =self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=lowerCamelCase, )
lowerCamelCase__ : Any =self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 625 | 0 |
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any, lowerCamelCase : int )-> List[str]:
lowerCamelCase__ : Any =n
lowerCamelCase__ : Tuple =[None] * self.n
lowerCamelCase__ : Tuple =0 # index of the first element
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : Union[str, Any] =0
def __len__( self : Optional[Any] )-> int:
return self.size
def snake_case ( self : str )-> bool:
return self.size == 0
def snake_case ( self : str )-> str:
return False if self.is_empty() else self.array[self.front]
def snake_case ( self : List[str], lowerCamelCase : Optional[Any] )-> Optional[Any]:
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
lowerCamelCase__ : Any =data
lowerCamelCase__ : int =(self.rear + 1) % self.n
self.size += 1
return self
def snake_case ( self : List[Any] )-> Dict:
if self.size == 0:
raise Exception('''UNDERFLOW''' )
lowerCamelCase__ : Dict =self.array[self.front]
lowerCamelCase__ : Optional[int] =None
lowerCamelCase__ : List[str] =(self.front + 1) % self.n
self.size -= 1
return temp
| 718 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ):
"""simple docstring"""
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int ):
"""simple docstring"""
# Base Case
if curr_ind == len(__lowerCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__lowerCamelCase ) ):
if valid_connection(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Insert current vertex into path as next transition
lowerCamelCase__ : Tuple =next_ver
# Validate created path
if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase__ : int =-1
return False
def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int = 0 ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[-1] * (len(__lowerCamelCase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase__ : Union[str, Any] =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , 1 ) else []
| 625 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : int = logging.get_logger(__name__)
_lowercase : Any = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'roformer'
def __init__( self : Optional[Any], lowerCamelCase : Union[str, Any]=5_0000, lowerCamelCase : Tuple=None, lowerCamelCase : Dict=768, lowerCamelCase : Tuple=12, lowerCamelCase : List[Any]=12, lowerCamelCase : List[Any]=3072, lowerCamelCase : str="gelu", lowerCamelCase : Any=0.1, lowerCamelCase : int=0.1, lowerCamelCase : str=1536, lowerCamelCase : Tuple=2, lowerCamelCase : int=0.02, lowerCamelCase : str=1E-12, lowerCamelCase : List[str]=0, lowerCamelCase : Optional[int]=False, lowerCamelCase : List[str]=True, **lowerCamelCase : List[str], )-> List[str]:
super().__init__(pad_token_id=lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =vocab_size
lowerCamelCase__ : Any =hidden_size if embedding_size is None else embedding_size
lowerCamelCase__ : Tuple =hidden_size
lowerCamelCase__ : Tuple =num_hidden_layers
lowerCamelCase__ : Union[str, Any] =num_attention_heads
lowerCamelCase__ : Optional[Any] =hidden_act
lowerCamelCase__ : Any =intermediate_size
lowerCamelCase__ : Union[str, Any] =hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase__ : int =max_position_embeddings
lowerCamelCase__ : List[Any] =type_vocab_size
lowerCamelCase__ : Tuple =initializer_range
lowerCamelCase__ : Optional[int] =layer_norm_eps
lowerCamelCase__ : Optional[Any] =rotary_value
lowerCamelCase__ : Optional[int] =use_cache
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def snake_case ( self : List[Any] )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase__ : Any ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ : int ={0: '''batch''', 1: '''sequence'''}
lowerCamelCase__ : List[str] ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 719 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowercase : List[str] = 2_5_0_0_0_4
_lowercase : Optional[Any] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = MBartTokenizer
_a = MBartTokenizerFast
_a = True
_a = True
def snake_case ( self : Tuple )-> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Union[str, Any] =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Dict )-> Union[str, Any]:
lowerCamelCase__ : Any =MBartTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
lowerCamelCase__ : List[Any] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
lowerCamelCase__ : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
lowerCamelCase__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
lowerCamelCase__ : str =tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
def snake_case ( self : Tuple )-> List[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase__ : int =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[str] =tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCamelCase__ : List[str] =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Any =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__ : Dict =tempfile.mkdtemp()
lowerCamelCase__ : List[str] =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Tuple =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Optional[int] =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Any =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__ : Optional[int] =tempfile.mkdtemp()
lowerCamelCase__ : int =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Dict =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Dict =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : int =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = 'facebook/mbart-large-en-ro'
_a = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_a = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_a = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case ( cls : List[Any] )-> Optional[int]:
lowerCamelCase__ : MBartTokenizer =MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' )
lowerCamelCase__ : Optional[int] =1
return cls
def snake_case ( self : Optional[Any] )-> List[str]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 25_0020 )
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
def snake_case ( self : Optional[Any] )-> str:
self.assertIn(lowerCamelCase, self.tokenizer.all_special_ids )
lowerCamelCase__ : Optional[int] =[RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
lowerCamelCase__ : Any =self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase )
lowerCamelCase__ : str =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase )
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Optional[int] =['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0], lowerCamelCase )
lowerCamelCase__ : Dict =10
lowerCamelCase__ : Optional[int] =self.tokenizer(lowerCamelCase, max_length=lowerCamelCase, truncation=lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2], 2 )
self.assertEqual(ids[-1], lowerCamelCase )
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
def snake_case ( self : int )-> Any:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_0026, 25_0001] )
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : int =tempfile.mkdtemp()
lowerCamelCase__ : Optional[int] =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =MBartTokenizer.from_pretrained(lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCamelCase )
@require_torch
def snake_case ( self : Optional[Any] )-> Tuple:
lowerCamelCase__ : Optional[Any] =self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, return_tensors='''pt''' )
lowerCamelCase__ : Dict =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case ( self : Optional[Any] )-> Any:
lowerCamelCase__ : str =self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', )
lowerCamelCase__ : List[Any] =shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
lowerCamelCase__ : Any =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case ( self : List[Any] )-> Dict:
lowerCamelCase__ : Any =self.tokenizer(self.src_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=3, return_tensors='''pt''' )
lowerCamelCase__ : Tuple =self.tokenizer(
text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=10, return_tensors='''pt''' )
lowerCamelCase__ : Union[str, Any] =targets['''input_ids''']
lowerCamelCase__ : List[Any] =shift_tokens_right(lowerCamelCase, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : str =self.tokenizer._build_translation_inputs(
'''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase ), {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 25_0004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_0001,
}, )
| 625 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =[2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase__ : Union[str, Any] =True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase__ : Optional[Any] =True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase__ : Optional[int] =True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase__ : Union[str, Any] =[3, 3, 3, 3]
lowerCamelCase__ : Dict =[5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase__ : List[str] =[4, 4, 4, 4]
lowerCamelCase__ : int =[3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase__ : List[str] =[3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase__ : Dict =[3, 3, 3, 3]
else:
lowerCamelCase__ : Optional[int] =[2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase__ : Dict =96
elif "small" in model_name:
lowerCamelCase__ : Any =96
elif "base" in model_name:
lowerCamelCase__ : str =128
elif "large" in model_name:
lowerCamelCase__ : Tuple =192
elif "xlarge" in model_name:
lowerCamelCase__ : Optional[Any] =256
elif "huge" in model_name:
lowerCamelCase__ : Optional[Any] =352
# set label information
lowerCamelCase__ : List[Any] ='''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase__ : int ='''imagenet-22k-id2label.json'''
else:
lowerCamelCase__ : Optional[int] ='''imagenet-1k-id2label.json'''
lowerCamelCase__ : List[Any] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : Dict ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : int ={v: k for k, v in idalabel.items()}
lowerCamelCase__ : int =FocalNetConfig(
embed_dim=__lowerCamelCase , depths=__lowerCamelCase , focal_levels=__lowerCamelCase , focal_windows=__lowerCamelCase , use_conv_embed=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , use_post_layernorm=__lowerCamelCase , use_layerscale=__lowerCamelCase , )
return config
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase__ : Tuple =name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowerCamelCase__ : str ='''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase__ : Optional[int] =name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase__ : str =name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase__ : int ='''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase__ : int ='''layernorm.bias'''
if "head" in name:
lowerCamelCase__ : Optional[int] =name.replace('''head''' , '''classifier''' )
else:
lowerCamelCase__ : Union[str, Any] ='''focalnet.''' + name
return name
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=False ):
"""simple docstring"""
lowerCamelCase__ : Dict ={
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase__ : Optional[Any] =model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __lowerCamelCase )
lowerCamelCase__ : List[str] =torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ : List[Any] =state_dict.pop(__lowerCamelCase )
lowerCamelCase__ : Any =val
lowerCamelCase__ : Tuple =get_focalnet_config(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =FocalNetForImageClassification(__lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(__lowerCamelCase )
# verify conversion
lowerCamelCase__ : Any ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ : Dict =BitImageProcessor(
do_resize=__lowerCamelCase , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase , crop_size=224 , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , )
lowerCamelCase__ : Any =Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
lowerCamelCase__ : Optional[int] =processor(images=__lowerCamelCase , return_tensors='''pt''' )
lowerCamelCase__ : List[str] =transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
lowerCamelCase__ : Dict =image_transforms(__lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __lowerCamelCase , atol=1e-4 )
lowerCamelCase__ : Any =model(**__lowerCamelCase )
lowerCamelCase__ : List[str] =outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase__ : Tuple =torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase__ : Union[str, Any] =torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
lowerCamelCase__ : Optional[Any] =torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase__ : Any =torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
lowerCamelCase__ : Any =torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase__ : Tuple =torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(f'''{model_name}''' )
processor.push_to_hub(f'''{model_name}''' )
if __name__ == "__main__":
_lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
_lowercase : Any = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 720 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 625 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_lowercase : List[Any] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : List[str] =torch.load(__lowerCamelCase , map_location='''cpu''' )
return sd
def snake_case__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int]=rename_keys_prefix ):
"""simple docstring"""
lowerCamelCase__ : Dict =OrderedDict()
lowerCamelCase__ : str =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowerCamelCase__ : Tuple =key
for name_pair in rename_keys_prefix:
lowerCamelCase__ : Optional[Any] =new_key.replace(name_pair[0] , name_pair[1] )
lowerCamelCase__ : List[str] =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowerCamelCase__ : Optional[Any] =new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
lowerCamelCase__ : Dict ='''pretraining'''
if "vcr" in checkpoint_path:
lowerCamelCase__ : str ={'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
lowerCamelCase__ : int ={'''visual_embedding_dim''': 2048}
elif "vqa" in checkpoint_path:
lowerCamelCase__ : int ={'''visual_embedding_dim''': 2048}
elif "nlvr" in checkpoint_path:
lowerCamelCase__ : Union[str, Any] ={'''visual_embedding_dim''': 1024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
lowerCamelCase__ : Union[str, Any] ={'''visual_embedding_dim''': 512}
lowerCamelCase__ : List[str] ='''multichoice'''
elif "vqa_advanced" in checkpoint_path:
lowerCamelCase__ : List[Any] ={'''visual_embedding_dim''': 2048}
lowerCamelCase__ : List[Any] ='''vqa_advanced'''
elif "vqa" in checkpoint_path:
lowerCamelCase__ : Any ={'''visual_embedding_dim''': 2048, '''num_labels''': 3129}
lowerCamelCase__ : Optional[Any] ='''vqa'''
elif "nlvr" in checkpoint_path:
lowerCamelCase__ : List[str] ={
'''visual_embedding_dim''': 1024,
'''num_labels''': 2,
}
lowerCamelCase__ : Any ='''nlvr'''
lowerCamelCase__ : Tuple =VisualBertConfig(**__lowerCamelCase )
# Load State Dict
lowerCamelCase__ : Any =load_state_dict(__lowerCamelCase )
lowerCamelCase__ : Any =get_new_dict(__lowerCamelCase , __lowerCamelCase )
if model_type == "pretraining":
lowerCamelCase__ : int =VisualBertForPreTraining(__lowerCamelCase )
elif model_type == "vqa":
lowerCamelCase__ : Optional[int] =VisualBertForQuestionAnswering(__lowerCamelCase )
elif model_type == "nlvr":
lowerCamelCase__ : str =VisualBertForVisualReasoning(__lowerCamelCase )
elif model_type == "multichoice":
lowerCamelCase__ : Tuple =VisualBertForMultipleChoice(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# Save Checkpoints
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_lowercase : Optional[int] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 721 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 10 , __lowerCamelCase : int = 22 ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =range(1 , __lowerCamelCase )
lowerCamelCase__ : str =range(1 , __lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }')
| 625 | 0 |
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=snake_case ):
"""simple docstring"""
__lowerCamelCase = ['note_seq']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["note_seq"] )
@classmethod
def UpperCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
@classmethod
def UpperCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
| 626 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 626 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE_ ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = ['pixel_values']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = True , lowercase = None , lowercase = None , **lowercase , ) -> None:
'''simple docstring'''
super().__init__(**lowercase )
A__ = size if size is not None else {"shortest_edge": 256}
A__ = get_size_dict(lowercase , default_to_square=lowercase )
A__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
A__ = get_size_dict(lowercase , param_name="crop_size" )
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = resample
A__ = do_rescale
A__ = rescale_factor
A__ = offset
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase ( self , lowercase , lowercase , lowercase = PILImageResampling.BILINEAR , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
A__ = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" in size:
A__ = get_resize_output_image_size(lowercase , size["shortest_edge"] , default_to_square=lowercase )
elif "height" in size and "width" in size:
A__ = (size["height"], size["width"])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
A__ = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase , size=(size["height"], size["width"]) , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase = True , lowercase = None , **lowercase , ) -> Any:
'''simple docstring'''
A__ = image.astype(np.floataa )
if offset:
A__ = image - (scale / 2)
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
A__ = to_numpy_array(lowercase )
if do_resize:
A__ = self.resize(image=lowercase , size=lowercase , resample=lowercase )
if do_center_crop:
A__ = self.center_crop(lowercase , size=lowercase )
if do_rescale:
A__ = self.rescale(image=lowercase , scale=lowercase , offset=lowercase )
if do_normalize:
A__ = self.normalize(image=lowercase , mean=lowercase , std=lowercase )
A__ = to_channel_dimension_format(lowercase , lowercase )
return image
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image:
'''simple docstring'''
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = offset if offset is not None else self.offset
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(lowercase , default_to_square=lowercase )
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(lowercase , param_name="crop_size" )
if not valid_images(lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
A__ = make_batched(lowercase )
A__ = [
[
self._preprocess_image(
image=lowercase , do_resize=lowercase , size=lowercase , resample=lowercase , do_center_crop=lowercase , crop_size=lowercase , do_rescale=lowercase , rescale_factor=lowercase , offset=lowercase , do_normalize=lowercase , image_mean=lowercase , image_std=lowercase , data_format=lowercase , )
for img in video
]
for video in videos
]
A__ = {"pixel_values": videos}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 626 |
from math import factorial
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 1_0_0 ) -> int:
'''simple docstring'''
return sum(map(SCREAMING_SNAKE_CASE_ , str(factorial(SCREAMING_SNAKE_CASE_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 626 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 626 |
lowerCAmelCase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: bytes ) -> bytes:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(SCREAMING_SNAKE_CASE_ )
A__ = "".join(bin(SCREAMING_SNAKE_CASE_ )[2:].zfill(8 ) for byte in data )
A__ = len(SCREAMING_SNAKE_CASE_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
A__ = b"=" * ((6 - len(SCREAMING_SNAKE_CASE_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE_ ) % 6)
else:
A__ = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 6 ) ).encode()
+ padding
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> bytes:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = (
"argument should be a bytes-like object or ASCII string, "
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(SCREAMING_SNAKE_CASE_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
try:
A__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
A__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
A__ = encoded_data[:-padding]
A__ = "".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
A__ = "".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE_ ) )[2:].zfill(6 ) for char in encoded_data )
A__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 8 )
]
return bytes(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 626 | 1 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {"""vocab_file""": """vocab.json"""}
lowerCAmelCase__ = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
lowerCAmelCase__ = {"""mgp-str""": 2_7}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase , lowercase="[GO]" , lowercase="[GO]" , lowercase="[s]" , lowercase="[GO]" , **lowercase ) -> int:
'''simple docstring'''
super().__init__(
unk_token=lowercase , bos_token=lowercase , eos_token=lowercase , pad_token=lowercase , **lowercase , )
with open(lowercase , encoding="utf-8" ) as vocab_handle:
A__ = json.load(lowercase )
A__ = {v: k for k, v in self.vocab.items()}
@property
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
return len(self.vocab )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
A__ = []
for s in text:
char_tokens.extend(lowercase )
return char_tokens
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
return self.vocab.get(lowercase , self.vocab.get(self.unk_token ) )
def UpperCamelCase ( self , lowercase ) -> Dict:
'''simple docstring'''
return self.decoder.get(lowercase )
def UpperCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowercase ):
logger.error("Vocabulary path ({}) should be a directory".format(lowercase ) )
return
A__ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase , ensure_ascii=lowercase ) + "\n" )
return (vocab_file,)
| 626 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__lowerCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(default=snake_case , metadata={'help': 'The input training data file (a text file).'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if self.train_file is not None:
A__ = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
A__ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = True
__lowerCamelCase = None
__lowerCamelCase = None
def __call__( self , lowercase ) -> Tuple:
'''simple docstring'''
A__ = "label" if "label" in features[0].keys() else "labels"
A__ = [feature.pop(lowercase ) for feature in features]
A__ = len(lowercase )
A__ = len(features[0]["input_ids"] )
A__ = [
[{k: v[i] for k, v in feature.items()} for i in range(lowercase )] for feature in features
]
A__ = list(chain(*lowercase ) )
A__ = self.tokenizer.pad(
lowercase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
A__ = {k: v.view(lowercase , lowercase , -1 ) for k, v in batch.items()}
# Add back labels
A__ = torch.tensor(lowercase , dtype=torch.intaa )
return batch
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
A__ = {}
if data_args.train_file is not None:
A__ = data_args.train_file
if data_args.validation_file is not None:
A__ = data_args.validation_file
A__ = data_args.train_file.split("." )[-1]
A__ = load_dataset(
SCREAMING_SNAKE_CASE_ , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
A__ = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
A__ = [F'ending{i}' for i in range(4 )]
A__ = "sent1"
A__ = "sent2"
if data_args.max_seq_length is None:
A__ = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
A__ = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
A__ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(SCREAMING_SNAKE_CASE_: Optional[Any] ):
A__ = [[context] * 4 for context in examples[context_name]]
A__ = examples[question_header_name]
A__ = [
[F'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(SCREAMING_SNAKE_CASE_ )
]
# Flatten out
A__ = list(chain(*SCREAMING_SNAKE_CASE_ ) )
A__ = list(chain(*SCREAMING_SNAKE_CASE_ ) )
# Tokenize
A__ = tokenizer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
A__ = raw_datasets["train"]
if data_args.max_train_samples is not None:
A__ = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_train_samples )
A__ = train_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
A__ = train_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
A__ = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
A__ = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_eval_samples )
A__ = eval_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
A__ = eval_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
A__ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(SCREAMING_SNAKE_CASE_: str ):
A__ , A__ = eval_predictions
A__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
A__ = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ = train_result.metrics
A__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ )
)
A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE_ )
A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE_ )
A__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 626 | 1 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCAmelCase__ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowerCAmelCase__ = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.1_5},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
lowerCAmelCase__ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCAmelCase__ = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowerCAmelCase__ = """allenai"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> Optional[int]:
'''simple docstring'''
A__ = dict((re.sub(R"@@$" , "" , SCREAMING_SNAKE_CASE_ ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , SCREAMING_SNAKE_CASE_ ), v) for k, v in d.items() )
A__ = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
A__ = d[k] # restore
return da
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Dict ) -> Optional[int]:
'''simple docstring'''
assert os.path.exists(SCREAMING_SNAKE_CASE_ )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
A__ = basename(SCREAMING_SNAKE_CASE_ )
A__ = dirname(SCREAMING_SNAKE_CASE_ )
A__ = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
A__ = cls.hub_models()
A__ = {"bpe": "fastbpe", "tokenizer": "moses"}
A__ = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'using checkpoint {checkpoint_file}' )
A__ = hub_utils.from_pretrained(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , archive_map=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A__ = vars(chkpt["args"]["model"] )
A__ = args["source_lang"]
A__ = args["target_lang"]
A__ = dirname(SCREAMING_SNAKE_CASE_ )
A__ = basename(SCREAMING_SNAKE_CASE_ )
# dicts
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , F'dict.{src_lang}.txt' )
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , F'dict.{tgt_lang}.txt' )
A__ = Dictionary.load(SCREAMING_SNAKE_CASE_ )
A__ = rewrite_dict_keys(src_dict.indices )
A__ = len(SCREAMING_SNAKE_CASE_ )
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , "vocab-src.json" )
print(F'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
A__ = True
for k in src_vocab.keys():
if not k.islower():
A__ = False
break
A__ = Dictionary.load(SCREAMING_SNAKE_CASE_ )
A__ = rewrite_dict_keys(tgt_dict.indices )
A__ = len(SCREAMING_SNAKE_CASE_ )
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , "vocab-tgt.json" )
print(F'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ ) )
# merges_file (bpecodes)
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
break
with open(SCREAMING_SNAKE_CASE_ , encoding="utf-8" ) as fin:
A__ = fin.read()
A__ = re.sub(R" \d+$" , "" , SCREAMING_SNAKE_CASE_ , 0 , re.M ) # remove frequency number
print(F'Generating {merges_file}' )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as fout:
fout.write(SCREAMING_SNAKE_CASE_ )
# model config
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", F'need to extend tokenizer to support bpe={args["tokenizer"]}'
A__ = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
A__ = 5
A__ = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
A__ = best_score_hparams[model_dir]["length_penalty"]
else:
A__ = 1.0
print(F'Generating {fsmt_model_config_file}' )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ ) )
# tokenizer config
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1_0_2_4,
"do_lower_case": do_lower_case,
}
print(F'Generating {fsmt_tokenizer_config_file}' )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ ) )
# model
A__ = chkpt["models"][0]
A__ = model.state_dict()
# rename keys to start with 'model.'
A__ = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
A__ = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = FSMTConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
A__ = FSMTForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
# check that it loads ok
model_new.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
# save
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(F'cd {data_root}' )
print(F'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase__ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 626 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
A__ = 1
A__ = 1
while repunit:
A__ = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
A__ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(SCREAMING_SNAKE_CASE_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 626 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list ) -> list:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) < 2:
return collection
def circle_sort_util(SCREAMING_SNAKE_CASE_: list , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int ) -> bool:
A__ = False
if low == high:
return swapped
A__ = low
A__ = high
while left < right:
if collection[left] > collection[right]:
A__ , A__ = (
collection[right],
collection[left],
)
A__ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
A__ , A__ = (
collection[right + 1],
collection[left],
)
A__ = True
A__ = low + int((high - low) / 2 )
A__ = circle_sort_util(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = circle_sort_util(SCREAMING_SNAKE_CASE_ , mid + 1 , SCREAMING_SNAKE_CASE_ )
return swapped or left_swap or right_swap
A__ = True
while is_not_sorted is True:
A__ = circle_sort_util(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return collection
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 626 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class a__ :
"""simple docstring"""
def __init__( self , lowercase ) -> int:
'''simple docstring'''
A__ = data
A__ = None
class a__ :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
A__ = None
A__ = None
def __iter__( self ) -> Iterator[Any]:
'''simple docstring'''
A__ = self.head
while self.head:
yield node.data
A__ = node.next
if node == self.head:
break
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ) -> List[Any]:
'''simple docstring'''
return "->".join(str(lowercase ) for item in iter(self ) )
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , lowercase )
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
self.insert_nth(0 , lowercase )
def UpperCamelCase ( self , lowercase , lowercase ) -> None:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
A__ = Node(lowercase )
if self.head is None:
A__ = new_node # first node points itself
A__ = A__ = new_node
elif index == 0: # insert at head
A__ = self.head
A__ = A__ = new_node
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = new_node
if index == len(self ) - 1: # insert at tail
A__ = new_node
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase ( self , lowercase = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
A__ = self.head
if self.head == self.tail: # just one node
A__ = A__ = None
elif index == 0: # delete head node
A__ = self.tail.next.next
A__ = self.head.next
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = temp.next.next
if index == len(self ) - 1: # delete at tail
A__ = temp
return delete_node.data
def UpperCamelCase ( self ) -> bool:
'''simple docstring'''
return len(self ) == 0
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
A__ = CircularLinkedList()
assert len(SCREAMING_SNAKE_CASE_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(SCREAMING_SNAKE_CASE_ ) == i
circular_linked_list.insert_nth(SCREAMING_SNAKE_CASE_ , i + 1 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 626 | 1 |
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int ) -> list[str]:
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
A__ = number_of_bytes // partitions
A__ = []
for i in range(SCREAMING_SNAKE_CASE_ ):
A__ = i * bytes_per_partition + 1
A__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 626 |
import math
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 7
lowerCAmelCase__ = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 2_0 ) -> str:
'''simple docstring'''
A__ = math.comb(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , SCREAMING_SNAKE_CASE_ )
A__ = NUM_COLOURS * (1 - missing_colour / total)
return F'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0))
| 626 | 1 |
class a__ ( snake_case ):
"""simple docstring"""
pass
class a__ ( snake_case ):
"""simple docstring"""
pass
class a__ :
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
'''simple docstring'''
A__ = [
[],
[],
[],
]
def UpperCamelCase ( self , lowercase , lowercase ) -> None:
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(lowercase )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ) -> str:
'''simple docstring'''
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class a__ :
"""simple docstring"""
def __init__( self ) -> Optional[int]:
'''simple docstring'''
A__ = []
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
if len(self.queue ) == 100:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
A__ = min(self.queue )
self.queue.remove(lowercase )
return data
def __str__( self ) -> str:
'''simple docstring'''
return str(self.queue )
def lowerCAmelCase__ ( ) -> str:
'''simple docstring'''
A__ = FixedPriorityQueue()
fpq.enqueue(0 , 1_0 )
fpq.enqueue(1 , 7_0 )
fpq.enqueue(0 , 1_0_0 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 6_4 )
fpq.enqueue(0 , 1_2_8 )
print(SCREAMING_SNAKE_CASE_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(SCREAMING_SNAKE_CASE_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def lowerCAmelCase__ ( ) -> Any:
'''simple docstring'''
A__ = ElementPriorityQueue()
epq.enqueue(1_0 )
epq.enqueue(7_0 )
epq.enqueue(1_0_0 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(6_4 )
epq.enqueue(1_2_8 )
print(SCREAMING_SNAKE_CASE_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(SCREAMING_SNAKE_CASE_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 626 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str = "cpu" , SCREAMING_SNAKE_CASE_: Union[str, None] = None ) -> None:
'''simple docstring'''
A__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location=SCREAMING_SNAKE_CASE_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
A__ = v.half()
if save_path is None: # overwrite src_path
A__ = src_path
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
fire.Fire(convert)
| 626 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> str:
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 626 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(
metadata={'help': 'The output directory where the model will be written.'} , )
__lowerCamelCase = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} , )
__lowerCamelCase = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
A__ = HfArgumentParser((ModelArguments,) )
((A__) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
A__ = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
A__ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
A__ = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
A__ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
A__ = True
A__ = True
A__ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=SCREAMING_SNAKE_CASE_ , decoder_config=SCREAMING_SNAKE_CASE_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
A__ = decoder_config.decoder_start_token_id
A__ = decoder_config.pad_token_id
if decoder_start_token_id is None:
A__ = decoder_config.bos_token_id
if pad_token_id is None:
A__ = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
A__ = decoder_config.eos_token_id
A__ = decoder_start_token_id
A__ = pad_token_id
A__ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
A__ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
A__ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 626 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""ViTFeatureExtractor"""]
lowerCAmelCase__ = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 626 |
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCAmelCase__ = """src/transformers"""
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase__ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase__ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCAmelCase__ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase__ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase__ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase__ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCAmelCase__ = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowerCAmelCase__ = re.compile(R"""^\s*else:""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> int:
'''simple docstring'''
if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None:
return None
A__ = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ = f.readlines()
A__ = 0
while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
return None
# First grab the objects without a specific backend in _import_structure
A__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
A__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ):
A__ = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0]
A__ = re.findall("\[([^\]]+)\]" , SCREAMING_SNAKE_CASE_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
A__ = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
A__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
A__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
A__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
A__ = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None:
A__ = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(", " )
A__ = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None:
A__ = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(", " )
A__ = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 1_2 + "\"" ):
objects.append(line[1_3:-3] )
line_index += 1
A__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A__ = []
while (
line_index < len(SCREAMING_SNAKE_CASE_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
A__ = lines[line_index]
A__ = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
A__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line is an if is_backend_available, we grab all objects associated.
A__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
A__ = lines[line_index]
A__ = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
A__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[int]:
'''simple docstring'''
def find_duplicates(SCREAMING_SNAKE_CASE_: str ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A__ = []
for key in import_dict_objects.keys():
A__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A__ = "base imports" if key == "none" else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def lowerCAmelCase__ ( ) -> Dict:
'''simple docstring'''
A__ = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , "__init__.py" )
A__ = parse_init(SCREAMING_SNAKE_CASE_ )
if objects is not None:
A__ = analyze_results(*SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
A__ = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("\n".join(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError("\n\n".join(SCREAMING_SNAKE_CASE_ ) )
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
A__ = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(SCREAMING_SNAKE_CASE_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob("*.py" ) ) ) == 0:
continue
A__ = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) )
A__ = short_path.replace(os.path.sep , "." )
submodules.append(SCREAMING_SNAKE_CASE_ )
for fname in files:
if fname == "__init__.py":
continue
A__ = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) )
A__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE_ )
return submodules
lowerCAmelCase__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
A__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(SCREAMING_SNAKE_CASE_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
A__ = spec.loader.load_module()
A__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
A__ = "\n".join(F'- {module}' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F'{list_of_modules}\n'
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 626 | 1 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCAmelCase__ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCAmelCase__ = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
lowerCAmelCase__ = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCAmelCase__ = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCAmelCase__ = [
("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""),
("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""),
("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""),
("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""),
("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""),
("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""),
("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""),
("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""),
("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""),
(
"""zero-shot-object-detection""",
"""MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""",
"""AutoModelForZeroShotObjectDetection""",
),
("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""),
("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""),
("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""),
("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""),
(
"""table-question-answering""",
"""MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForTableQuestionAnswering""",
),
("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""),
("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""),
(
"""next-sentence-prediction""",
"""MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""",
"""AutoModelForNextSentencePrediction""",
),
(
"""audio-frame-classification""",
"""MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForAudioFrameClassification""",
),
("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""),
(
"""document-question-answering""",
"""MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForDocumentQuestionAnswering""",
),
(
"""visual-question-answering""",
"""MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForVisualQuestionAnswering""",
),
("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""),
(
"""zero-shot-image-classification""",
"""MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForZeroShotImageClassification""",
),
("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""),
("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""),
("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""),
]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> Dict:
'''simple docstring'''
A__ = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , SCREAMING_SNAKE_CASE_ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
A__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
A__ = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
A__ = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
A__ = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
A__ = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(SCREAMING_SNAKE_CASE_ ):
A__ = None
if _re_tf_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
A__ = tf_models
A__ = _re_tf_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
elif _re_flax_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
A__ = flax_models
A__ = _re_flax_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
elif _re_pt_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
A__ = pt_models
A__ = _re_pt_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
if lookup_dict is not None:
while len(SCREAMING_SNAKE_CASE_ ) > 0:
if attr_name in model_prefix_to_model_type:
A__ = True
break
# Try again after removing the last word in the name
A__ = "".join(camel_case_split(SCREAMING_SNAKE_CASE_ )[:-1] )
A__ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
A__ = list(SCREAMING_SNAKE_CASE_ )
all_models.sort()
A__ = {"model_type": all_models}
A__ = [pt_models[t] for t in all_models]
A__ = [tf_models[t] for t in all_models]
A__ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
A__ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
A__ = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
A__ = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
A__ = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
A__ = "AutoTokenizer"
A__ = [processors[t] for t in all_models]
return pd.DataFrame(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> Tuple:
'''simple docstring'''
A__ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
A__ = [model_mapping, F'TF_{model_mapping}', F'FLAX_{model_mapping}']
A__ = [auto_class, F'TF_{auto_class}', F'Flax_{auto_class}']
# Loop through all three frameworks
for module, cls, mapping in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# The type of pipeline may not exist in this framework
if not hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
continue
# First extract all model_names
A__ = []
for name in getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).values():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
model_names.append(SCREAMING_SNAKE_CASE_ )
else:
model_names.extend(list(SCREAMING_SNAKE_CASE_ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: int ) -> int:
'''simple docstring'''
A__ = get_frameworks_table()
A__ = Dataset.from_pandas(SCREAMING_SNAKE_CASE_ )
A__ = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=SCREAMING_SNAKE_CASE_ )
A__ = Dataset.from_json(SCREAMING_SNAKE_CASE_ )
A__ = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(SCREAMING_SNAKE_CASE_ ) )
}
A__ = update_pipeline_and_auto_class_table(SCREAMING_SNAKE_CASE_ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
A__ = sorted(table.keys() )
A__ = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
A__ = Dataset.from_pandas(SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(SCREAMING_SNAKE_CASE_ , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(SCREAMING_SNAKE_CASE_ , "pipeline_tags.json" ) )
if commit_sha is not None:
A__ = (
F'Update with commit {commit_sha}\n\nSee: '
F'https://github.com/huggingface/transformers/commit/{commit_sha}'
)
else:
A__ = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=SCREAMING_SNAKE_CASE_ , repo_type="dataset" , token=SCREAMING_SNAKE_CASE_ , commit_message=SCREAMING_SNAKE_CASE_ , )
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
A__ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
A__ = transformers_module.pipelines.SUPPORTED_TASKS
A__ = []
for key in pipeline_tasks:
if key not in in_table:
A__ = pipeline_tasks[key]["pt"]
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
A__ = model[0]
A__ = model.__name__
if model not in in_table.values():
missing.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
A__ = ", ".join(SCREAMING_SNAKE_CASE_ )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
F'`utils/update_metadata.py`: {msg}. Please add them!' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
lowerCAmelCase__ = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 626 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase ) -> None:
'''simple docstring'''
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , lowercase , )
super().__init__(*lowercase , **lowercase )
| 626 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Dict:
'''simple docstring'''
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: dict[int, list[int]] ) -> list[tuple[int, int]]:
'''simple docstring'''
A__ = 0
A__ = len(SCREAMING_SNAKE_CASE_ ) # No of vertices in graph
A__ = [0] * n
A__ = [False] * n
def dfs(SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[Any] ):
A__ = True
A__ = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , id_ )
A__ = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
A__ = min(low[at] , low[to] )
A__ = []
for i in range(SCREAMING_SNAKE_CASE_ ):
if not visited[i]:
dfs(SCREAMING_SNAKE_CASE_ , -1 , SCREAMING_SNAKE_CASE_ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 626 |
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: list[str] | None = None ) -> list[list[str]]:
'''simple docstring'''
A__ = word_bank or []
# create a table
A__ = len(SCREAMING_SNAKE_CASE_ ) + 1
A__ = []
for _ in range(SCREAMING_SNAKE_CASE_ ):
table.append([] )
# seed value
A__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(SCREAMING_SNAKE_CASE_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(SCREAMING_SNAKE_CASE_ )] == word:
A__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(SCREAMING_SNAKE_CASE_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(SCREAMING_SNAKE_CASE_ )]:
combination.reverse()
return table[len(SCREAMING_SNAKE_CASE_ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 626 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: List[str] ) -> str:
'''simple docstring'''
A__ = ""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> int:
'''simple docstring'''
return data[1:] + data[0]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Tuple ) -> List[Any]:
'''simple docstring'''
A__ = ""
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[Any] ) -> List[Any]:
'''simple docstring'''
A__ = int("0b" + data[0] + data[-1] , 2 )
A__ = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Any ) -> Union[str, Any]:
'''simple docstring'''
A__ = message[:4]
A__ = message[4:]
A__ = apply_table(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = xor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = apply_sbox(SCREAMING_SNAKE_CASE_ , temp[:4] ) # noqa: E741
A__ = apply_sbox(SCREAMING_SNAKE_CASE_ , temp[4:] )
A__ = "0" * (2 - len(SCREAMING_SNAKE_CASE_ )) + l # noqa: E741
A__ = "0" * (2 - len(SCREAMING_SNAKE_CASE_ )) + r
A__ = apply_table(l + r , SCREAMING_SNAKE_CASE_ )
A__ = xor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return temp + right
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter 10 bit key: """)
lowerCAmelCase__ = input("""Enter 8 bit message: """)
lowerCAmelCase__ = [6, 3, 7, 4, 8, 5, 1_0, 9]
lowerCAmelCase__ = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
lowerCAmelCase__ = [2, 4, 3, 1]
lowerCAmelCase__ = [2, 6, 3, 1, 4, 8, 5, 7]
lowerCAmelCase__ = [4, 1, 3, 5, 7, 2, 8, 6]
lowerCAmelCase__ = [4, 1, 2, 3, 2, 3, 4, 1]
lowerCAmelCase__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowerCAmelCase__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowerCAmelCase__ = apply_table(key, paa_table)
lowerCAmelCase__ = temp[:5]
lowerCAmelCase__ = temp[5:]
lowerCAmelCase__ = left_shift(left)
lowerCAmelCase__ = left_shift(right)
lowerCAmelCase__ = apply_table(left + right, pa_table)
lowerCAmelCase__ = left_shift(left)
lowerCAmelCase__ = left_shift(right)
lowerCAmelCase__ = left_shift(left)
lowerCAmelCase__ = left_shift(right)
lowerCAmelCase__ = apply_table(left + right, pa_table)
# encryption
lowerCAmelCase__ = apply_table(message, IP)
lowerCAmelCase__ = function(expansion, sa, sa, keya, temp)
lowerCAmelCase__ = temp[4:] + temp[:4]
lowerCAmelCase__ = function(expansion, sa, sa, keya, temp)
lowerCAmelCase__ = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
lowerCAmelCase__ = apply_table(CT, IP)
lowerCAmelCase__ = function(expansion, sa, sa, keya, temp)
lowerCAmelCase__ = temp[4:] + temp[:4]
lowerCAmelCase__ = function(expansion, sa, sa, keya, temp)
lowerCAmelCase__ = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 626 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: str=1_0_2_4 ) -> Any:
'''simple docstring'''
A__ , A__ = [], []
A__ = list(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
A__ , A__ = sorted_examples[0]
def is_too_big(SCREAMING_SNAKE_CASE_: List[str] ):
return tok(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
A__ = new_src + " " + src
A__ = new_tgt + " " + tgt
if is_too_big(SCREAMING_SNAKE_CASE_ ) or is_too_big(SCREAMING_SNAKE_CASE_ ): # cant fit, finalize example
finished_src.append(SCREAMING_SNAKE_CASE_ )
finished_tgt.append(SCREAMING_SNAKE_CASE_ )
A__ , A__ = src, tgt
else: # can fit, keep adding
A__ , A__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(SCREAMING_SNAKE_CASE_ )
finished_tgt.append(SCREAMING_SNAKE_CASE_ )
return finished_src, finished_tgt
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Path , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ = Path(SCREAMING_SNAKE_CASE_ )
save_path.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
for split in ["train"]:
A__ , A__ = data_dir / F'{split}.source', data_dir / F'{split}.target'
A__ = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE_ ).open().readlines()]
A__ = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE_ ).open().readlines()]
A__ , A__ = pack_examples(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F'packed {split} split from {len(SCREAMING_SNAKE_CASE_ )} examples -> {len(SCREAMING_SNAKE_CASE_ )}.' )
Path(save_path / F'{split}.source' ).open("w" ).write("\n".join(SCREAMING_SNAKE_CASE_ ) )
Path(save_path / F'{split}.target' ).open("w" ).write("\n".join(SCREAMING_SNAKE_CASE_ ) )
for split in ["val", "test"]:
A__ , A__ = data_dir / F'{split}.source', data_dir / F'{split}.target'
shutil.copyfile(SCREAMING_SNAKE_CASE_ , save_path / F'{split}.source' )
shutil.copyfile(SCREAMING_SNAKE_CASE_ , save_path / F'{split}.target' )
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
A__ = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=SCREAMING_SNAKE_CASE_ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=SCREAMING_SNAKE_CASE_ , default=1_2_8 )
parser.add_argument("--data_dir" , type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("--save_path" , type=SCREAMING_SNAKE_CASE_ )
A__ = parser.parse_args()
A__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(SCREAMING_SNAKE_CASE_ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 626 | 1 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase__ = version.parse(importlib_metadata.version("""nltk"""))
if NLTK_VERSION >= version.Version("""3.6.4"""):
from nltk import word_tokenize
lowerCAmelCase__ = """\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
"""
lowerCAmelCase__ = """\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
"""
lowerCAmelCase__ = """
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
'meteor': meteor score.
Examples:
>>> meteor = datasets.load_metric('meteor')
>>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]
>>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results[\"meteor\"], 4))
0.6944
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def UpperCamelCase ( self , lowercase , lowercase , lowercase=0.9 , lowercase=3 , lowercase=0.5 ) -> Dict:
'''simple docstring'''
if NLTK_VERSION >= version.Version("3.6.5" ):
A__ = [
meteor_score.single_meteor_score(
word_tokenize(lowercase ) , word_tokenize(lowercase ) , alpha=lowercase , beta=lowercase , gamma=lowercase )
for ref, pred in zip(lowercase , lowercase )
]
else:
A__ = [
meteor_score.single_meteor_score(lowercase , lowercase , alpha=lowercase , beta=lowercase , gamma=lowercase )
for ref, pred in zip(lowercase , lowercase )
]
return {"meteor": np.mean(lowercase )}
| 626 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Namespace ) -> Tuple:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowerCAmelCase__ = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class a__ ( snake_case ):
"""simple docstring"""
@staticmethod
def UpperCamelCase ( lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=lowercase , required=lowercase , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=lowercase , required=lowercase , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=lowercase , required=lowercase , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=lowercase , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=lowercase , default=lowercase , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=lowercase )
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase , ) -> Union[str, Any]:
'''simple docstring'''
A__ = logging.get_logger("transformers-cli/converting" )
self._logger.info(F'Loading model {model_type}' )
A__ = model_type
A__ = tf_checkpoint
A__ = pytorch_dump_output
A__ = config
A__ = finetuning_task_name
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
if "ckpt" in self._tf_checkpoint.lower():
A__ = self._tf_checkpoint
A__ = ""
else:
A__ = self._tf_checkpoint
A__ = ""
convert_transfo_xl_checkpoint_to_pytorch(
lowercase , self._config , self._pytorch_dump_output , lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 626 | 1 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> int:
'''simple docstring'''
A__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> List[Any]:
'''simple docstring'''
A__ , A__ = emb.weight.shape
A__ = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
A__ = emb.weight.data
return lin_layer
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Tuple=None ) -> Optional[Any]:
'''simple docstring'''
A__ = {}
for old_key in state_dict.keys():
A__ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
A__ = key.replace("moe_layer.experts.0" , F'ffn.experts.expert_{expert_idx}' )
else:
A__ = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
A__ = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
A__ = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
A__ = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
A__ = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
A__ = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
A__ = key.replace("final_layer_norm" , "ff_layer_norm" )
A__ = state_dict[old_key]
return new_dict
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: str = WEIGHTS_NAME ) -> List[Any]:
'''simple docstring'''
A__ = []
A__ = 0
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
for expert in range(SCREAMING_SNAKE_CASE_ ):
A__ = switch_checkpoint_path + F'-rank-{expert}.pt'
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
A__ = torch.load(SCREAMING_SNAKE_CASE_ )["model"]
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
A__ = rename_fairseq_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = os.path.join(
SCREAMING_SNAKE_CASE_ , weights_name.replace(".bin" , F'-{len(SCREAMING_SNAKE_CASE_ )+1:05d}-of-???.bin' ) )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(SCREAMING_SNAKE_CASE_ )[0]].dtype )
# Add the last block
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , weights_name.replace(".bin" , F'-{len(SCREAMING_SNAKE_CASE_ )+1:05d}-of-???.bin' ) )
A__ = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
A__ = rename_fairseq_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(SCREAMING_SNAKE_CASE_ ) == 1:
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Otherwise, let's build the index
A__ = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE_ ):
A__ = weights_name.replace(".bin" , F'-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE_ ):05d}.bin' )
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
for key in shard:
A__ = shard_file
# Add the metadata
A__ = {"total_size": total_size}
A__ = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , "w" , encoding="utf-8" ) as f:
A__ = json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ ) + "\n"
f.write(SCREAMING_SNAKE_CASE_ )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ , lowerCAmelCase__ = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
lowerCAmelCase__ = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowerCAmelCase__ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 626 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , lowercase=1 / 255 , lowercase=True , ) -> Union[str, Any]:
'''simple docstring'''
A__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self , lowercase , lowercase=False ) -> int:
'''simple docstring'''
if not batched:
A__ = image_inputs[0]
if isinstance(lowercase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["shortest_edge"] * h / w )
A__ = self.size["shortest_edge"]
elif w > h:
A__ = self.size["shortest_edge"]
A__ = int(self.size["shortest_edge"] * w / h )
else:
A__ = self.size["shortest_edge"]
A__ = self.size["shortest_edge"]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(lowercase , key=lambda lowercase : item[0] )[0]
A__ = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = DetaImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = DetaImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "do_rescale" ) )
self.assertTrue(hasattr(lowercase , "do_pad" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
A__ = json.loads(f.read() )
A__ = {"image_id": 39769, "annotations": target}
# encode them
A__ = DetaImageProcessor()
A__ = image_processing(images=lowercase , annotations=lowercase , return_tensors="pt" )
# verify pixel values
A__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowercase )
A__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase )
A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase ) )
# verify orig_size
A__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase ) )
# verify size
A__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase ) )
@slow
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
A__ = json.loads(f.read() )
A__ = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
A__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
A__ = DetaImageProcessor(format="coco_panoptic" )
A__ = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors="pt" )
# verify pixel values
A__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowercase )
A__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase )
A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase ) )
# verify masks
A__ = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowercase )
# verify orig_size
A__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase ) )
# verify size
A__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase ) )
| 626 | 1 |
from __future__ import annotations
from collections.abc import Callable
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE_: int | float , SCREAMING_SNAKE_CASE_: int | float , SCREAMING_SNAKE_CASE_: int = 1_0_0 , ) -> float:
'''simple docstring'''
A__ = x_start
A__ = fnc(SCREAMING_SNAKE_CASE_ )
A__ = 0.0
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
A__ = (x_end - x_start) / steps + xa
A__ = fnc(SCREAMING_SNAKE_CASE_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
A__ = xa
A__ = fxa
return area
if __name__ == "__main__":
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> Any:
'''simple docstring'''
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
lowerCAmelCase__ = 1_0
while i <= 1_0_0_0_0_0:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 1_0
| 626 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
A__ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
A__ = model(lowercase )["last_hidden_state"]
A__ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice.
A__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 626 | 1 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCAmelCase__ = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowerCAmelCase__ = {
"""facebook/blenderbot_small-90M""": 5_1_2,
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = BlenderbotSmallTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase="<|endoftext|>" , lowercase="<|endoftext|>" , lowercase="<|endoftext|>" , lowercase=False , lowercase=True , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=lowercase , merges=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , ) , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , **lowercase , )
A__ = add_prefix_space
def UpperCamelCase ( self , lowercase , lowercase=None ) -> Dict:
'''simple docstring'''
A__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 626 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = DanceDiffusionPipeline
__lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
__lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowercase , use_timestep_embedding=lowercase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
A__ = IPNDMScheduler()
A__ = {
"unet": unet,
"scheduler": scheduler,
}
return components
def UpperCamelCase ( self , lowercase , lowercase=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(lowercase ).startswith("mps" ):
A__ = torch.manual_seed(lowercase )
else:
A__ = torch.Generator(device=lowercase ).manual_seed(lowercase )
A__ = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = DanceDiffusionPipeline(**lowercase )
A__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A__ = self.get_dummy_inputs(lowercase )
A__ = pipe(**lowercase )
A__ = output.audios
A__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
A__ = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = torch_device
A__ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
A__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A__ = torch.manual_seed(0 )
A__ = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.096 )
A__ = output.audios
A__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
A__ = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = torch_device
A__ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
A__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A__ = torch.manual_seed(0 )
A__ = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.096 )
A__ = output.audios
A__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
A__ = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 626 | 1 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = (UnCLIPScheduler,)
def UpperCamelCase ( self , **lowercase ) -> str:
'''simple docstring'''
A__ = {
"num_train_timesteps": 1000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**lowercase )
return config
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase , prev_timestep=lowercase )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(variance_type="fixed_small_log" )
A__ = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(variance_type="learned_range" )
A__ = scheduler_class(**lowercase )
A__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.001_0011 < 1e-5
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase )
A__ = scheduler.timesteps
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
A__ = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
A__ = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
A__ = pred_prev_sample
A__ = torch.sum(torch.abs(lowercase ) )
A__ = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase )
scheduler.set_timesteps(25 )
A__ = scheduler.timesteps
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
A__ = model(lowercase , lowercase )
if i + 1 == timesteps.shape[0]:
A__ = None
else:
A__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
A__ = scheduler.step(
lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample
A__ = pred_prev_sample
A__ = torch.sum(torch.abs(lowercase ) )
A__ = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
pass
| 626 |
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: list[list[str]] , SCREAMING_SNAKE_CASE_: int , ) -> None:
'''simple docstring'''
A__ = len(SCREAMING_SNAKE_CASE_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(SCREAMING_SNAKE_CASE_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> None:
'''simple docstring'''
A__ = []
depth_first_search([] , [] , [] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Print all the boards
for board in boards:
for column in board:
print(SCREAMING_SNAKE_CASE_ )
print("" )
print(len(SCREAMING_SNAKE_CASE_ ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 626 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> Any:
'''simple docstring'''
A__ = filter(lambda SCREAMING_SNAKE_CASE_ : p.requires_grad , model.parameters() )
A__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: str ) -> str:
'''simple docstring'''
if metric == "rouge2":
A__ = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
A__ = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
A__ = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
A__ = ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: int ) -> Any:
'''simple docstring'''
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , )
class a__ ( pl.Callback ):
"""simple docstring"""
def UpperCamelCase ( self , lowercase , lowercase ) -> List[str]:
'''simple docstring'''
A__ = {F'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowercase )
@rank_zero_only
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase=True ) -> None:
'''simple docstring'''
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
A__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
A__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
A__ = od / "test_results.txt"
A__ = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A__ = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
A__ = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=lowercase )
generations_file.parent.mkdir(exist_ok=lowercase )
with open(lowercase , "a+" ) as writer:
for key in sorted(lowercase ):
if key in ["log", "progress_bar", "preds"]:
continue
A__ = metrics[key]
if isinstance(lowercase , torch.Tensor ):
A__ = val.item()
A__ = F'{key}: {val:.6f}\n'
writer.write(lowercase )
if not save_generations:
return
if "preds" in metrics:
A__ = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(lowercase )
@rank_zero_only
def UpperCamelCase ( self , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
try:
A__ = pl_module.model.model.num_parameters()
except AttributeError:
A__ = pl_module.model.num_parameters()
A__ = count_trainable_parameters(lowercase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase ( self , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowercase , lowercase , "test" )
@rank_zero_only
def UpperCamelCase ( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 626 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'new-model'
if is_tf_available():
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = NewModelConfig
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = "bert-base-cased"
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = "bert-base-cased"
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForPreTraining.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForCausalLM.from_pretrained(lowercase )
A__ , A__ = TFAutoModelForCausalLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForMaskedLM.from_pretrained(lowercase )
A__ , A__ = TFAutoModelForMaskedLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase )
A__ , A__ = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForSequenceClassification.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForQuestionAnswering.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
@require_tensorflow_probability
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase )
A__ , A__ = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(lowercase , lowercase )
A__ = copy.deepcopy(model.config )
A__ = ["FunnelBaseModel"]
A__ = TFAutoModel.from_config(lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase )
A__ = TFAutoModel.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
try:
AutoConfig.register("new-model" , lowercase )
A__ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowercase ):
auto_class.register(lowercase , lowercase )
auto_class.register(lowercase , lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
auto_class.register(lowercase , lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ = BertModelTester(self ).get_config()
A__ = NewModelConfig(**tiny_config.to_dict() )
A__ = auto_class.from_config(lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase )
A__ = auto_class.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , "bert-base is not a local folder and is not a valid model identifier" ):
A__ = TFAutoModel.from_pretrained("bert-base" )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
A__ = TFAutoModel.from_pretrained(lowercase , revision="aaaaaa" )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
A__ = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(lowercase , "Use `from_pt=True` to load this model" ):
A__ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
A__ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
A__ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
A__ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 626 | 1 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(snake_case )
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , **lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowercase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , lowercase , **lowercase ) -> str:
'''simple docstring'''
return super().__call__(lowercase , **lowercase )
def UpperCamelCase ( self , **lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = {}
if "candidate_labels" in kwargs:
A__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
A__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCamelCase ( self , lowercase , lowercase=None , lowercase="This is a photo of {}." ) -> int:
'''simple docstring'''
A__ = load_image(lowercase )
A__ = self.image_processor(images=[image] , return_tensors=self.framework )
A__ = candidate_labels
A__ = [hypothesis_template.format(lowercase ) for x in candidate_labels]
A__ = self.tokenizer(lowercase , return_tensors=self.framework , padding=lowercase )
A__ = [text_inputs]
return inputs
def UpperCamelCase ( self , lowercase ) -> Tuple:
'''simple docstring'''
A__ = model_inputs.pop("candidate_labels" )
A__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , lowercase ):
A__ = text_inputs[0]
else:
# Batching case.
A__ = text_inputs[0][0]
A__ = self.model(**lowercase , **lowercase )
A__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCamelCase ( self , lowercase ) -> Dict:
'''simple docstring'''
A__ = model_outputs.pop("candidate_labels" )
A__ = model_outputs["logits"][0]
if self.framework == "pt":
A__ = logits.softmax(dim=-1 ).squeeze(-1 )
A__ = probs.tolist()
if not isinstance(lowercase , lowercase ):
A__ = [scores]
elif self.framework == "tf":
A__ = stable_softmax(lowercase , axis=-1 )
A__ = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
A__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(lowercase , lowercase ) , key=lambda lowercase : -x[0] )
]
return result
| 626 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
lowerCAmelCase__ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ = None
# source code of `config_class`
A__ = inspect.getsource(SCREAMING_SNAKE_CASE_ )
A__ = _re_checkpoint.findall(SCREAMING_SNAKE_CASE_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
A__ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
A__ = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
A__ = ckpt_name
break
return checkpoint
def lowerCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
A__ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
A__ = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE_ )
A__ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
A__ = "\n".join(sorted(SCREAMING_SNAKE_CASE_ ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 626 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = tempfile.mkdtemp()
A__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
A__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
A__ = os.path.join(self.tmpdirname , lowercase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase , lowercase )
def UpperCamelCase ( self , **lowercase ) -> Dict:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def UpperCamelCase ( self , **lowercase ) -> Union[str, Any]:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def UpperCamelCase ( self , **lowercase ) -> Any:
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowercase )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = self.get_image_processor()
A__ = AlignProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_slow.save_pretrained(self.tmpdirname )
A__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase )
A__ = AlignProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_fast.save_pretrained(self.tmpdirname )
A__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase )
self.assertIsInstance(processor_fast.tokenizer , lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase )
self.assertIsInstance(processor_fast.image_processor , lowercase )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A__ = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = AlignProcessor(tokenizer=lowercase , image_processor=lowercase )
A__ = self.prepare_image_inputs()
A__ = image_processor(lowercase , return_tensors="np" )
A__ = processor(images=lowercase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = AlignProcessor(tokenizer=lowercase , image_processor=lowercase )
A__ = "lower newer"
A__ = processor(text=lowercase )
A__ = tokenizer(lowercase , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = AlignProcessor(tokenizer=lowercase , image_processor=lowercase )
A__ = "lower newer"
A__ = self.prepare_image_inputs()
A__ = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = AlignProcessor(tokenizer=lowercase , image_processor=lowercase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(lowercase )
A__ = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = AlignProcessor(tokenizer=lowercase , image_processor=lowercase )
A__ = "lower newer"
A__ = self.prepare_image_inputs()
A__ = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 626 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=0.9 , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , ) -> str:
'''simple docstring'''
A__ = size if size is not None else {"shortest_edge": 30}
A__ = crop_size if crop_size is not None else {"height": 30, "width": 30}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize_and_center_crop
A__ = size
A__ = crop_pct
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
self.assertTrue(hasattr(lowercase , "crop_pct" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 626 | 1 |
import inspect
import unittest
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
A__ = inspect.getmembers(lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
A__ = "k-diffusion"
elif backend == "invisible_watermark":
A__ = "invisible-watermark"
assert backend in deps, F'{backend} is not in the deps table!'
| 626 |
import datasets
from .evaluate import evaluate
lowerCAmelCase__ = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
lowerCAmelCase__ = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
lowerCAmelCase__ = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
A__ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
A__ = evaluate(dataset=lowercase , predictions=lowercase )
return score
| 626 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple=None ) -> List[Any]:
'''simple docstring'''
if subparsers is not None:
A__ = subparsers.add_parser("test" )
else:
A__ = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=SCREAMING_SNAKE_CASE_ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
return parser
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> Dict:
'''simple docstring'''
A__ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
A__ = script_name
else:
A__ = F'--config_file={args.config_file} {script_name}'
A__ = ["accelerate-launch"] + test_args.split()
A__ = execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
A__ = test_command_parser()
A__ = parser.parse_args()
test_command(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 626 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 626 | 1 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[List, PIL.Image.Image, torch.Tensor] ) -> Tuple:
'''simple docstring'''
warnings.warn(
"The preprocess method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor.preprocess instead" , SCREAMING_SNAKE_CASE_ , )
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ):
A__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
A__ , A__ = image[0].size
A__ , A__ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
A__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
A__ = np.concatenate(SCREAMING_SNAKE_CASE_ , axis=0 )
A__ = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) / 255.0
A__ = image.transpose(0 , 3 , 1 , 2 )
A__ = 2.0 * image - 1.0
A__ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
elif isinstance(image[0] , torch.Tensor ):
A__ = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
return image
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[List, PIL.Image.Image, torch.Tensor] ) -> Any:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
return mask
elif isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ):
A__ = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
A__ , A__ = mask[0].size
A__ , A__ = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
A__ = [np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask]
A__ = np.concatenate(SCREAMING_SNAKE_CASE_ , axis=0 )
A__ = mask.astype(np.floataa ) / 255.0
A__ = 0
A__ = 1
A__ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
elif isinstance(mask[0] , torch.Tensor ):
A__ = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
return mask
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase , scheduler=lowercase )
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase = 250 , lowercase = 0.0 , lowercase = 10 , lowercase = 10 , lowercase = None , lowercase = "pil" , lowercase = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
A__ = image
A__ = _preprocess_image(lowercase )
A__ = original_image.to(device=self.device , dtype=self.unet.dtype )
A__ = _preprocess_mask(lowercase )
A__ = mask_image.to(device=self.device , dtype=self.unet.dtype )
A__ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowercase , lowercase ) and len(lowercase ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(lowercase )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
A__ = original_image.shape
A__ = randn_tensor(lowercase , generator=lowercase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowercase , lowercase , lowercase , self.device )
A__ = eta
A__ = self.scheduler.timesteps[0] + 1
A__ = generator[0] if isinstance(lowercase , lowercase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
A__ = self.unet(lowercase , lowercase ).sample
# compute previous image: x_t -> x_t-1
A__ = self.scheduler.step(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
A__ = self.scheduler.undo_step(lowercase , lowercase , lowercase )
A__ = t
A__ = (image / 2 + 0.5).clamp(0 , 1 )
A__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase )
| 626 |
from math import factorial
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 1_0_0 ) -> int:
'''simple docstring'''
return sum(map(SCREAMING_SNAKE_CASE_ , str(factorial(SCREAMING_SNAKE_CASE_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 626 | 1 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Dict:
'''simple docstring'''
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[int] ) -> int:
'''simple docstring'''
A__ = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
A__ = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
A__ = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
A__ = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
A__ = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
A__ = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
A__ = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
A__ = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
A__ = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
A__ = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
A__ = key.replace("image_encoder.module" , "flava.image_model" )
A__ = key.replace("text_encoder.module" , "flava.text_model" )
A__ = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
A__ = key.replace("mm_encoder.module" , "flava.multimodal_model" )
A__ = key.replace("text_projection" , "flava.text_projection" )
A__ = key.replace("image_projection" , "flava.image_projection" )
A__ = value.float()
for key, value in codebook_state_dict.items():
A__ = value
return upgrade
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: int=None ) -> Any:
'''simple docstring'''
if config_path is not None:
A__ = FlavaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
A__ = FlavaConfig()
A__ = FlavaForPreTraining(SCREAMING_SNAKE_CASE_ ).eval()
A__ = convert_dalle_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , save_checkpoint=SCREAMING_SNAKE_CASE_ )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
A__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
else:
A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
A__ = upgrade_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
A__ = hf_model.state_dict()
A__ = count_parameters(SCREAMING_SNAKE_CASE_ )
A__ = count_parameters(SCREAMING_SNAKE_CASE_ ) + count_parameters(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCAmelCase__ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 626 |
lowerCAmelCase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: bytes ) -> bytes:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(SCREAMING_SNAKE_CASE_ )
A__ = "".join(bin(SCREAMING_SNAKE_CASE_ )[2:].zfill(8 ) for byte in data )
A__ = len(SCREAMING_SNAKE_CASE_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
A__ = b"=" * ((6 - len(SCREAMING_SNAKE_CASE_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE_ ) % 6)
else:
A__ = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 6 ) ).encode()
+ padding
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> bytes:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = (
"argument should be a bytes-like object or ASCII string, "
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(SCREAMING_SNAKE_CASE_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
try:
A__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
A__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
A__ = encoded_data[:-padding]
A__ = "".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
A__ = "".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE_ ) )[2:].zfill(6 ) for char in encoded_data )
A__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 8 )
]
return bytes(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 626 | 1 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: int ) -> Union[str, Any]:
'''simple docstring'''
A__ = old_name
if "patch_embed" in old_name:
A__ , A__ , A__ = old_name.split("." )
if layer == "0":
A__ = old_name.replace("0" , "convolution1" )
elif layer == "1":
A__ = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
A__ = old_name.replace("3" , "convolution2" )
else:
A__ = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" , SCREAMING_SNAKE_CASE_ ):
A__ = R"\b\d{2}\b"
if bool(re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ):
A__ = re.search(R"\d\.\d\d." , SCREAMING_SNAKE_CASE_ ).group()
else:
A__ = re.search(R"\d\.\d." , SCREAMING_SNAKE_CASE_ ).group()
if int(match[0] ) < 6:
A__ = old_name.replace(SCREAMING_SNAKE_CASE_ , "" )
A__ = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
A__ = "intermediate_stages." + trimmed_name
else:
A__ = old_name.replace(SCREAMING_SNAKE_CASE_ , "" )
if int(match[2] ) < num_meta4D_last_stage:
A__ = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
A__ = str(int(match[2] ) - num_meta4D_last_stage )
A__ = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
A__ = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
A__ = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
A__ = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
A__ = trimmed_name.replace("fc2" , "linear_out" )
A__ = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d." , SCREAMING_SNAKE_CASE_ ):
A__ = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
A__ = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A__ = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A__ = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
A__ = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
A__ = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
A__ = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
A__ = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A__ = new_name.replace("norm" , "layernorm" )
A__ = "efficientformer." + new_name
else:
A__ = "efficientformer.encoder." + new_name
return new_name
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: List[Any] ) -> int:
'''simple docstring'''
for key in checkpoint.copy().keys():
A__ = checkpoint.pop(SCREAMING_SNAKE_CASE_ )
A__ = val
return checkpoint
def lowerCAmelCase__ ( ) -> Any:
'''simple docstring'''
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return image
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Path , SCREAMING_SNAKE_CASE_: Path , SCREAMING_SNAKE_CASE_: Path , SCREAMING_SNAKE_CASE_: bool ) -> Any:
'''simple docstring'''
A__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"]
A__ = EfficientFormerConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
A__ = EfficientFormerForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE_ )
A__ = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
A__ = config.depths[-1] - config.num_metaad_blocks + 1
A__ = convert_torch_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
A__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
A__ = prepare_img()
A__ = 2_5_6
A__ = 2_2_4
A__ = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
A__ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
# original processing pipeline
A__ = Compose(
[
Resize(SCREAMING_SNAKE_CASE_ , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(SCREAMING_SNAKE_CASE_ ),
ToTensor(),
Normalize(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
] )
A__ = image_transforms(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = model(SCREAMING_SNAKE_CASE_ )
A__ = outputs.logits
A__ = (1, 1_0_0_0)
if "l1" in model_name:
A__ = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :1_0] , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A__ = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :1_0] , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A__ = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
processor.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
lowerCAmelCase__ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 626 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__lowerCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(default=snake_case , metadata={'help': 'The input training data file (a text file).'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if self.train_file is not None:
A__ = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
A__ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = True
__lowerCamelCase = None
__lowerCamelCase = None
def __call__( self , lowercase ) -> Tuple:
'''simple docstring'''
A__ = "label" if "label" in features[0].keys() else "labels"
A__ = [feature.pop(lowercase ) for feature in features]
A__ = len(lowercase )
A__ = len(features[0]["input_ids"] )
A__ = [
[{k: v[i] for k, v in feature.items()} for i in range(lowercase )] for feature in features
]
A__ = list(chain(*lowercase ) )
A__ = self.tokenizer.pad(
lowercase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
A__ = {k: v.view(lowercase , lowercase , -1 ) for k, v in batch.items()}
# Add back labels
A__ = torch.tensor(lowercase , dtype=torch.intaa )
return batch
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
A__ = {}
if data_args.train_file is not None:
A__ = data_args.train_file
if data_args.validation_file is not None:
A__ = data_args.validation_file
A__ = data_args.train_file.split("." )[-1]
A__ = load_dataset(
SCREAMING_SNAKE_CASE_ , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
A__ = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
A__ = [F'ending{i}' for i in range(4 )]
A__ = "sent1"
A__ = "sent2"
if data_args.max_seq_length is None:
A__ = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
A__ = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
A__ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(SCREAMING_SNAKE_CASE_: Optional[Any] ):
A__ = [[context] * 4 for context in examples[context_name]]
A__ = examples[question_header_name]
A__ = [
[F'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(SCREAMING_SNAKE_CASE_ )
]
# Flatten out
A__ = list(chain(*SCREAMING_SNAKE_CASE_ ) )
A__ = list(chain(*SCREAMING_SNAKE_CASE_ ) )
# Tokenize
A__ = tokenizer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
A__ = raw_datasets["train"]
if data_args.max_train_samples is not None:
A__ = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_train_samples )
A__ = train_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
A__ = train_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
A__ = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
A__ = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_eval_samples )
A__ = eval_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
A__ = eval_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
A__ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(SCREAMING_SNAKE_CASE_: str ):
A__ , A__ = eval_predictions
A__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
A__ = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ = train_result.metrics
A__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ )
)
A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE_ )
A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE_ )
A__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 626 | 1 |
import argparse
import json
import subprocess
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[Any] ) -> Dict:
'''simple docstring'''
A__ = []
A__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
A__ = subprocess.run(SCREAMING_SNAKE_CASE_ , shell=SCREAMING_SNAKE_CASE_ , stdout=subprocess.PIPE )
A__ = output.stdout.decode("utf-8" )
A__ = json.loads(SCREAMING_SNAKE_CASE_ )
A__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(SCREAMING_SNAKE_CASE_ )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
A__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> Union[str, Any]:
'''simple docstring'''
return values.split("," )
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowerCAmelCase__ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 626 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
A__ = 1
A__ = 1
while repunit:
A__ = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
A__ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(SCREAMING_SNAKE_CASE_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 626 | 1 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCAmelCase__ = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
lowerCAmelCase__ = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ = list(state_dict.keys() )
for name in state_dict_keys:
A__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
# emb -> embedding
if name.startswith("emb." ):
A__ = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
A__ = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
A__ = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , SCREAMING_SNAKE_CASE_ )
# ffn -> feed_forward
A__ = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , SCREAMING_SNAKE_CASE_ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
A__ = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
A__ = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
A__ = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
A__ = "rwkv." + name
A__ = weight
return state_dict
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[Any]=None , SCREAMING_SNAKE_CASE_: str=None , SCREAMING_SNAKE_CASE_: Union[str, Any]=False , SCREAMING_SNAKE_CASE_: str=None ) -> Optional[Any]:
'''simple docstring'''
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
A__ = 5_0_2_7_7
A__ = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
A__ = PreTrainedTokenizerFast(tokenizer_file=SCREAMING_SNAKE_CASE_ )
A__ = len(SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
# 2. Build the config
A__ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
A__ = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' )
A__ = RwkvConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(SCREAMING_SNAKE_CASE_ )
# 3. Download model file then convert state_dict
A__ = hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
A__ = convert_state_dict(SCREAMING_SNAKE_CASE_ )
# 4. Split in shards and save
A__ , A__ = shard_checkpoint(SCREAMING_SNAKE_CASE_ )
for shard_file, shard in shards.items():
torch.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if index is not None:
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save the index as well
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
A__ = json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ ) + "\n"
f.write(SCREAMING_SNAKE_CASE_ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
A__ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
A__ = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
A__ = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , max_shard_size="2GB" )
tokenizer.push_to_hub(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
lowerCAmelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 626 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class a__ :
"""simple docstring"""
def __init__( self , lowercase ) -> int:
'''simple docstring'''
A__ = data
A__ = None
class a__ :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
A__ = None
A__ = None
def __iter__( self ) -> Iterator[Any]:
'''simple docstring'''
A__ = self.head
while self.head:
yield node.data
A__ = node.next
if node == self.head:
break
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ) -> List[Any]:
'''simple docstring'''
return "->".join(str(lowercase ) for item in iter(self ) )
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , lowercase )
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
self.insert_nth(0 , lowercase )
def UpperCamelCase ( self , lowercase , lowercase ) -> None:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
A__ = Node(lowercase )
if self.head is None:
A__ = new_node # first node points itself
A__ = A__ = new_node
elif index == 0: # insert at head
A__ = self.head
A__ = A__ = new_node
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = new_node
if index == len(self ) - 1: # insert at tail
A__ = new_node
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase ( self , lowercase = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
A__ = self.head
if self.head == self.tail: # just one node
A__ = A__ = None
elif index == 0: # delete head node
A__ = self.tail.next.next
A__ = self.head.next
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = temp.next.next
if index == len(self ) - 1: # delete at tail
A__ = temp
return delete_node.data
def UpperCamelCase ( self ) -> bool:
'''simple docstring'''
return len(self ) == 0
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
A__ = CircularLinkedList()
assert len(SCREAMING_SNAKE_CASE_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(SCREAMING_SNAKE_CASE_ ) == i
circular_linked_list.insert_nth(SCREAMING_SNAKE_CASE_ , i + 1 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 626 | 1 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
__lowerCamelCase = field(default=snake_case , metadata={'help': 'Whether to SortishSamler or not.'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
__lowerCamelCase = field(default=snake_case , metadata={'help': 'whether to use adafactor'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
__lowerCamelCase = field(default=snake_case , metadata={'help': 'Dropout probability. Goes into model.config.'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
__lowerCamelCase = field(
default='linear' , metadata={'help': f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 626 |
import math
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 7
lowerCAmelCase__ = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 2_0 ) -> str:
'''simple docstring'''
A__ = math.comb(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , SCREAMING_SNAKE_CASE_ )
A__ = NUM_COLOURS * (1 - missing_colour / total)
return F'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0))
| 626 | 1 |
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
A__ = []
A__ = 1
while len(SCREAMING_SNAKE_CASE_ ) < 1e6:
constant.append(str(SCREAMING_SNAKE_CASE_ ) )
i += 1
A__ = "".join(SCREAMING_SNAKE_CASE_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[9_9] )
* int(constant[9_9_9] )
* int(constant[9_9_9_9] )
* int(constant[9_9_9_9_9] )
* int(constant[9_9_9_9_9_9] )
)
if __name__ == "__main__":
print(solution())
| 626 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str = "cpu" , SCREAMING_SNAKE_CASE_: Union[str, None] = None ) -> None:
'''simple docstring'''
A__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location=SCREAMING_SNAKE_CASE_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
A__ = v.half()
if save_path is None: # overwrite src_path
A__ = src_path
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
fire.Fire(convert)
| 626 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__lowerCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(default=snake_case , metadata={'help': 'The input training data file (a text file).'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if self.train_file is not None:
A__ = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
A__ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = True
__lowerCamelCase = None
__lowerCamelCase = None
def __call__( self , lowercase ) -> Tuple:
'''simple docstring'''
A__ = "label" if "label" in features[0].keys() else "labels"
A__ = [feature.pop(lowercase ) for feature in features]
A__ = len(lowercase )
A__ = len(features[0]["input_ids"] )
A__ = [
[{k: v[i] for k, v in feature.items()} for i in range(lowercase )] for feature in features
]
A__ = list(chain(*lowercase ) )
A__ = self.tokenizer.pad(
lowercase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
A__ = {k: v.view(lowercase , lowercase , -1 ) for k, v in batch.items()}
# Add back labels
A__ = torch.tensor(lowercase , dtype=torch.intaa )
return batch
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
A__ = {}
if data_args.train_file is not None:
A__ = data_args.train_file
if data_args.validation_file is not None:
A__ = data_args.validation_file
A__ = data_args.train_file.split("." )[-1]
A__ = load_dataset(
SCREAMING_SNAKE_CASE_ , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
A__ = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
A__ = [F'ending{i}' for i in range(4 )]
A__ = "sent1"
A__ = "sent2"
if data_args.max_seq_length is None:
A__ = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
A__ = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
A__ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(SCREAMING_SNAKE_CASE_: Optional[Any] ):
A__ = [[context] * 4 for context in examples[context_name]]
A__ = examples[question_header_name]
A__ = [
[F'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(SCREAMING_SNAKE_CASE_ )
]
# Flatten out
A__ = list(chain(*SCREAMING_SNAKE_CASE_ ) )
A__ = list(chain(*SCREAMING_SNAKE_CASE_ ) )
# Tokenize
A__ = tokenizer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
A__ = raw_datasets["train"]
if data_args.max_train_samples is not None:
A__ = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_train_samples )
A__ = train_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
A__ = train_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
A__ = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
A__ = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_eval_samples )
A__ = eval_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
A__ = eval_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
A__ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(SCREAMING_SNAKE_CASE_: str ):
A__ , A__ = eval_predictions
A__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
A__ = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ = train_result.metrics
A__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ )
)
A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE_ )
A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE_ )
A__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 626 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(
metadata={'help': 'The output directory where the model will be written.'} , )
__lowerCamelCase = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} , )
__lowerCamelCase = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
A__ = HfArgumentParser((ModelArguments,) )
((A__) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
A__ = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
A__ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
A__ = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
A__ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
A__ = True
A__ = True
A__ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=SCREAMING_SNAKE_CASE_ , decoder_config=SCREAMING_SNAKE_CASE_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
A__ = decoder_config.decoder_start_token_id
A__ = decoder_config.pad_token_id
if decoder_start_token_id is None:
A__ = decoder_config.bos_token_id
if pad_token_id is None:
A__ = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
A__ = decoder_config.eos_token_id
A__ = decoder_start_token_id
A__ = pad_token_id
A__ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
A__ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
A__ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 626 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCAmelCase__ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCAmelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> str:
'''simple docstring'''
if "://" in dataset_path:
A__ = dataset_path.split("://" )[1]
return dataset_path
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: fsspec.AbstractFileSystem ) -> bool:
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: fsspec.AbstractFileSystem , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str ) -> int:
'''simple docstring'''
A__ = not is_remote_filesystem(SCREAMING_SNAKE_CASE_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(SCREAMING_SNAKE_CASE_ ) , fs._strip_protocol(SCREAMING_SNAKE_CASE_ ) )
else:
fs.mv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , recursive=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
A__ = None
A__ = None
A__ = threading.Lock()
| 626 |
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCAmelCase__ = """src/transformers"""
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase__ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase__ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCAmelCase__ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase__ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase__ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase__ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCAmelCase__ = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowerCAmelCase__ = re.compile(R"""^\s*else:""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> int:
'''simple docstring'''
if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None:
return None
A__ = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ = f.readlines()
A__ = 0
while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
return None
# First grab the objects without a specific backend in _import_structure
A__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
A__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ):
A__ = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0]
A__ = re.findall("\[([^\]]+)\]" , SCREAMING_SNAKE_CASE_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
A__ = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
A__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
A__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
A__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
A__ = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None:
A__ = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(", " )
A__ = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None:
A__ = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(", " )
A__ = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 1_2 + "\"" ):
objects.append(line[1_3:-3] )
line_index += 1
A__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A__ = []
while (
line_index < len(SCREAMING_SNAKE_CASE_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
A__ = lines[line_index]
A__ = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
A__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line is an if is_backend_available, we grab all objects associated.
A__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
A__ = lines[line_index]
A__ = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
A__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[int]:
'''simple docstring'''
def find_duplicates(SCREAMING_SNAKE_CASE_: str ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A__ = []
for key in import_dict_objects.keys():
A__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A__ = "base imports" if key == "none" else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def lowerCAmelCase__ ( ) -> Dict:
'''simple docstring'''
A__ = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , "__init__.py" )
A__ = parse_init(SCREAMING_SNAKE_CASE_ )
if objects is not None:
A__ = analyze_results(*SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
A__ = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("\n".join(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError("\n\n".join(SCREAMING_SNAKE_CASE_ ) )
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
A__ = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(SCREAMING_SNAKE_CASE_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob("*.py" ) ) ) == 0:
continue
A__ = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) )
A__ = short_path.replace(os.path.sep , "." )
submodules.append(SCREAMING_SNAKE_CASE_ )
for fname in files:
if fname == "__init__.py":
continue
A__ = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) )
A__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE_ )
return submodules
lowerCAmelCase__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
A__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(SCREAMING_SNAKE_CASE_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
A__ = spec.loader.load_module()
A__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
A__ = "\n".join(F'- {module}' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F'{list_of_modules}\n'
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 626 | 1 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = GPTSanJapaneseTokenizer
__lowerCamelCase = False
__lowerCamelCase = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
# fmt: off
A__ = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
A__ = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
A__ = {"unk_token": "<unk>"}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(lowercase ) )
def UpperCamelCase ( self , **lowercase ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def UpperCamelCase ( self , lowercase ) -> Any:
'''simple docstring'''
A__ = "こんにちは、世界。 \nこんばんは、㔺界。😀"
A__ = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def UpperCamelCase ( self , lowercase ) -> Tuple:
'''simple docstring'''
A__ , A__ = self.get_input_output_texts(lowercase )
A__ = tokenizer.encode(lowercase , add_special_tokens=lowercase )
A__ = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
return text, ids
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = self.get_tokenizer()
# Testing tokenization
A__ = "こんにちは、世界。 こんばんは、㔺界。"
A__ = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
A__ = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
# Testing conversion to ids without special tokens
A__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
A__ = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(lowercase , lowercase )
# Testing conversion to ids with special tokens
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
A__ = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(lowercase , lowercase )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = self.get_tokenizer()
# Testing tokenization
A__ = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
A__ = "こんにちは、、、、世界。こんばんは、、、、世界。"
A__ = tokenizer.encode(lowercase )
A__ = tokenizer.decode(lowercase )
self.assertEqual(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
A__ = "こんにちは、世界。"
A__ = "こんばんは、㔺界。😀"
A__ = "こんにちは、世界。こんばんは、世界。😀"
A__ = tokenizer.encode(prefix_text + input_text )
A__ = tokenizer.encode("" , prefix_text=prefix_text + input_text )
A__ = tokenizer.encode(lowercase , prefix_text=lowercase )
A__ = tokenizer.decode(lowercase )
A__ = tokenizer.decode(lowercase )
A__ = tokenizer.decode(lowercase )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
A__ = "こんにちは、世界。"
A__ = "こんばんは、㔺界。😀"
A__ = len(tokenizer.encode(lowercase ) ) - 2
A__ = len(tokenizer.encode(lowercase ) ) - 2
A__ = [1] + [0] * (len_prefix + len_text + 1)
A__ = [1] * (len_prefix + len_text + 1) + [0]
A__ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
A__ = tokenizer(prefix_text + input_text ).token_type_ids
A__ = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
A__ = tokenizer(lowercase , prefix_text=lowercase ).token_type_ids
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
A__ = tokenizer.encode("あンいワ" )
A__ = tokenizer.encode("" , prefix_text="あンいワ" )
A__ = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(lowercase ) , tokenizer.decode(lowercase ) )
self.assertEqual(tokenizer.decode(lowercase ) , tokenizer.decode(lowercase ) )
self.assertNotEqual(lowercase , lowercase )
self.assertNotEqual(lowercase , lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
A__ = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
A__ = tokenizer(lowercase , padding=lowercase )
A__ = tokenizer.batch_encode_plus(lowercase , padding=lowercase )
# fmt: off
A__ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
A__ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
A__ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowercase )
self.assertListEqual(x_token.token_type_ids , lowercase )
self.assertListEqual(x_token.attention_mask , lowercase )
self.assertListEqual(x_token_a.input_ids , lowercase )
self.assertListEqual(x_token_a.token_type_ids , lowercase )
self.assertListEqual(x_token_a.attention_mask , lowercase )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
| 626 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase ) -> None:
'''simple docstring'''
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , lowercase , )
super().__init__(*lowercase , **lowercase )
| 626 | 1 |
import datasets
from .evaluate import evaluate
lowerCAmelCase__ = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
lowerCAmelCase__ = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
lowerCAmelCase__ = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
A__ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
A__ = evaluate(dataset=lowercase , predictions=lowercase )
return score
| 626 |
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: list[str] | None = None ) -> list[list[str]]:
'''simple docstring'''
A__ = word_bank or []
# create a table
A__ = len(SCREAMING_SNAKE_CASE_ ) + 1
A__ = []
for _ in range(SCREAMING_SNAKE_CASE_ ):
table.append([] )
# seed value
A__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(SCREAMING_SNAKE_CASE_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(SCREAMING_SNAKE_CASE_ )] == word:
A__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(SCREAMING_SNAKE_CASE_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(SCREAMING_SNAKE_CASE_ )]:
combination.reverse()
return table[len(SCREAMING_SNAKE_CASE_ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 626 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 626 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: str=1_0_2_4 ) -> Any:
'''simple docstring'''
A__ , A__ = [], []
A__ = list(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
A__ , A__ = sorted_examples[0]
def is_too_big(SCREAMING_SNAKE_CASE_: List[str] ):
return tok(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
A__ = new_src + " " + src
A__ = new_tgt + " " + tgt
if is_too_big(SCREAMING_SNAKE_CASE_ ) or is_too_big(SCREAMING_SNAKE_CASE_ ): # cant fit, finalize example
finished_src.append(SCREAMING_SNAKE_CASE_ )
finished_tgt.append(SCREAMING_SNAKE_CASE_ )
A__ , A__ = src, tgt
else: # can fit, keep adding
A__ , A__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(SCREAMING_SNAKE_CASE_ )
finished_tgt.append(SCREAMING_SNAKE_CASE_ )
return finished_src, finished_tgt
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Path , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ = Path(SCREAMING_SNAKE_CASE_ )
save_path.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
for split in ["train"]:
A__ , A__ = data_dir / F'{split}.source', data_dir / F'{split}.target'
A__ = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE_ ).open().readlines()]
A__ = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE_ ).open().readlines()]
A__ , A__ = pack_examples(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F'packed {split} split from {len(SCREAMING_SNAKE_CASE_ )} examples -> {len(SCREAMING_SNAKE_CASE_ )}.' )
Path(save_path / F'{split}.source' ).open("w" ).write("\n".join(SCREAMING_SNAKE_CASE_ ) )
Path(save_path / F'{split}.target' ).open("w" ).write("\n".join(SCREAMING_SNAKE_CASE_ ) )
for split in ["val", "test"]:
A__ , A__ = data_dir / F'{split}.source', data_dir / F'{split}.target'
shutil.copyfile(SCREAMING_SNAKE_CASE_ , save_path / F'{split}.source' )
shutil.copyfile(SCREAMING_SNAKE_CASE_ , save_path / F'{split}.target' )
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
A__ = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=SCREAMING_SNAKE_CASE_ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=SCREAMING_SNAKE_CASE_ , default=1_2_8 )
parser.add_argument("--data_dir" , type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("--save_path" , type=SCREAMING_SNAKE_CASE_ )
A__ = parser.parse_args()
A__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(SCREAMING_SNAKE_CASE_ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 626 | 1 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase__ = get_tests_dir("""fixtures/dummy-config.json""")
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = 0
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(lowercase , lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = AutoConfig.for_model("roberta" )
self.assertIsInstance(lowercase , lowercase )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
A__ = os.path.join(lowercase , "fake-roberta" )
os.makedirs(lowercase , exist_ok=lowercase )
with open(os.path.join(lowercase , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
A__ = AutoConfig.from_pretrained(lowercase )
self.assertEqual(type(lowercase ) , lowercase )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
try:
AutoConfig.register("custom" , lowercase )
# Wrong model type will raise an error
with self.assertRaises(lowercase ):
AutoConfig.register("model" , lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoConfig.register("bert" , lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase )
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , "bert-base is not a local folder and is not a valid model identifier" ):
A__ = AutoConfig.from_pretrained("bert-base" )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
A__ = AutoConfig.from_pretrained(lowercase , revision="aaaaaa" )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
A__ = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
with self.assertRaises(lowercase ):
A__ = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase ):
A__ = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowercase )
A__ = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowercase )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase )
A__ = AutoConfig.from_pretrained(lowercase , trust_remote_code=lowercase )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'new-model'
try:
AutoConfig.register("new-model" , lowercase )
# If remote code is not set, the default is to use local
A__ = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
A__ = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowercase )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
A__ = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowercase )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 626 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Namespace ) -> Tuple:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowerCAmelCase__ = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class a__ ( snake_case ):
"""simple docstring"""
@staticmethod
def UpperCamelCase ( lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=lowercase , required=lowercase , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=lowercase , required=lowercase , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=lowercase , required=lowercase , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=lowercase , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=lowercase , default=lowercase , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=lowercase )
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase , ) -> Union[str, Any]:
'''simple docstring'''
A__ = logging.get_logger("transformers-cli/converting" )
self._logger.info(F'Loading model {model_type}' )
A__ = model_type
A__ = tf_checkpoint
A__ = pytorch_dump_output
A__ = config
A__ = finetuning_task_name
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
if "ckpt" in self._tf_checkpoint.lower():
A__ = self._tf_checkpoint
A__ = ""
else:
A__ = self._tf_checkpoint
A__ = ""
convert_transfo_xl_checkpoint_to_pytorch(
lowercase , self._config , self._pytorch_dump_output , lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 626 | 1 |
class a__ :
"""simple docstring"""
def __init__( self ) -> Optional[int]:
'''simple docstring'''
A__ = 0
A__ = 0
A__ = {}
def UpperCamelCase ( self , lowercase ) -> Any:
'''simple docstring'''
if vertex not in self.adjacency:
A__ = {}
self.num_vertices += 1
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
self.add_vertex(lowercase )
self.add_vertex(lowercase )
if head == tail:
return
A__ = weight
A__ = weight
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.get_edges()
for edge in edges:
A__ , A__ , A__ = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase ) ):
A__ = list(edges[i] )
edges.sort(key=lambda lowercase : e[2] )
for i in range(len(lowercase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
A__ = edges[i][2] + 1
for edge in edges:
A__ , A__ , A__ = edge
A__ = weight
A__ = weight
def __str__( self ) -> List[Any]:
'''simple docstring'''
A__ = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
A__ = self.adjacency[head][tail]
string += F'{head} -> {tail} == {weight}\n'
return string.rstrip("\n" )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def UpperCamelCase ( lowercase=None , lowercase=None ) -> List[str]:
'''simple docstring'''
A__ = Graph()
if vertices is None:
A__ = []
if edges is None:
A__ = []
for vertex in vertices:
g.add_vertex(lowercase )
for edge in edges:
g.add_edge(*lowercase )
return g
class a__ :
"""simple docstring"""
def __init__( self ) -> List[str]:
'''simple docstring'''
A__ = {}
A__ = {}
def __len__( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.parent )
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
if item in self.parent:
return self.find(lowercase )
A__ = item
A__ = 0
return item
def UpperCamelCase ( self , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(lowercase )
if item != self.parent[item]:
A__ = self.find(self.parent[item] )
return self.parent[item]
def UpperCamelCase ( self , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = self.find(lowercase )
A__ = self.find(lowercase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
A__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
A__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
A__ = roota
return roota
return None
@staticmethod
def UpperCamelCase ( lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = graph.num_vertices
A__ = Graph.UnionFind()
A__ = []
while num_components > 1:
A__ = {}
for vertex in graph.get_vertices():
A__ = -1
A__ = graph.get_edges()
for edge in edges:
A__ , A__ , A__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
A__ , A__ , A__ = edge
A__ = union_find.find(lowercase )
A__ = union_find.find(lowercase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
A__ , A__ , A__ = cheap_edge[vertex]
if union_find.find(lowercase ) != union_find.find(lowercase ):
union_find.union(lowercase , lowercase )
mst_edges.append(cheap_edge[vertex] )
A__ = num_components - 1
A__ = Graph.build(edges=lowercase )
return mst
| 626 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , lowercase=1 / 255 , lowercase=True , ) -> Union[str, Any]:
'''simple docstring'''
A__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self , lowercase , lowercase=False ) -> int:
'''simple docstring'''
if not batched:
A__ = image_inputs[0]
if isinstance(lowercase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["shortest_edge"] * h / w )
A__ = self.size["shortest_edge"]
elif w > h:
A__ = self.size["shortest_edge"]
A__ = int(self.size["shortest_edge"] * w / h )
else:
A__ = self.size["shortest_edge"]
A__ = self.size["shortest_edge"]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(lowercase , key=lambda lowercase : item[0] )[0]
A__ = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = DetaImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = DetaImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "do_rescale" ) )
self.assertTrue(hasattr(lowercase , "do_pad" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
A__ = json.loads(f.read() )
A__ = {"image_id": 39769, "annotations": target}
# encode them
A__ = DetaImageProcessor()
A__ = image_processing(images=lowercase , annotations=lowercase , return_tensors="pt" )
# verify pixel values
A__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowercase )
A__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase )
A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase ) )
# verify orig_size
A__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase ) )
# verify size
A__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase ) )
@slow
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
A__ = json.loads(f.read() )
A__ = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
A__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
A__ = DetaImageProcessor(format="coco_panoptic" )
A__ = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors="pt" )
# verify pixel values
A__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowercase )
A__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase )
A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase ) )
# verify masks
A__ = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowercase )
# verify orig_size
A__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase ) )
# verify size
A__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase ) )
| 626 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = DanceDiffusionPipeline
__lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
__lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowercase , use_timestep_embedding=lowercase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
A__ = IPNDMScheduler()
A__ = {
"unet": unet,
"scheduler": scheduler,
}
return components
def UpperCamelCase ( self , lowercase , lowercase=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(lowercase ).startswith("mps" ):
A__ = torch.manual_seed(lowercase )
else:
A__ = torch.Generator(device=lowercase ).manual_seed(lowercase )
A__ = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = DanceDiffusionPipeline(**lowercase )
A__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A__ = self.get_dummy_inputs(lowercase )
A__ = pipe(**lowercase )
A__ = output.audios
A__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
A__ = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = torch_device
A__ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
A__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A__ = torch.manual_seed(0 )
A__ = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.096 )
A__ = output.audios
A__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
A__ = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = torch_device
A__ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
A__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A__ = torch.manual_seed(0 )
A__ = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.096 )
A__ = output.audios
A__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
A__ = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 626 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
A__ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
A__ = model(lowercase )["last_hidden_state"]
A__ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice.
A__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 626 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 626 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = DanceDiffusionPipeline
__lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
__lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowercase , use_timestep_embedding=lowercase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
A__ = IPNDMScheduler()
A__ = {
"unet": unet,
"scheduler": scheduler,
}
return components
def UpperCamelCase ( self , lowercase , lowercase=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(lowercase ).startswith("mps" ):
A__ = torch.manual_seed(lowercase )
else:
A__ = torch.Generator(device=lowercase ).manual_seed(lowercase )
A__ = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = DanceDiffusionPipeline(**lowercase )
A__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A__ = self.get_dummy_inputs(lowercase )
A__ = pipe(**lowercase )
A__ = output.audios
A__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
A__ = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = torch_device
A__ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
A__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A__ = torch.manual_seed(0 )
A__ = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.096 )
A__ = output.audios
A__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
A__ = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = torch_device
A__ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
A__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A__ = torch.manual_seed(0 )
A__ = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.096 )
A__ = output.audios
A__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
A__ = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 626 | 1 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: ndarray ) -> float:
'''simple docstring'''
return np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class a__ :
"""simple docstring"""
def __init__( self , *,
lowercase = np.inf , lowercase = "linear" , lowercase = 0.0 , ) -> None:
'''simple docstring'''
A__ = regularization
A__ = gamma
if kernel == "linear":
A__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
A__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
A__ = F'Unknown kernel: {kernel}'
raise ValueError(lowercase )
def UpperCamelCase ( self , lowercase , lowercase ) -> float:
'''simple docstring'''
return np.dot(lowercase , lowercase )
def UpperCamelCase ( self , lowercase , lowercase ) -> float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def UpperCamelCase ( self , lowercase , lowercase ) -> None:
'''simple docstring'''
A__ = observations
A__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((A__) , ) = np.shape(lowercase )
def to_minimize(lowercase ) -> float:
A__ = 0
((A__) , ) = np.shape(lowercase )
for i in range(lowercase ):
for j in range(lowercase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(lowercase )
A__ = LinearConstraint(lowercase , 0 , 0 )
A__ = Bounds(0 , self.regularization )
A__ = minimize(
lowercase , np.ones(lowercase ) , bounds=lowercase , constraints=[ly_contraint] ).x
A__ = l_star
# calculating mean offset of separation plane to points
A__ = 0
for i in range(lowercase ):
for j in range(lowercase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
A__ = s / n
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
A__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowercase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 626 |
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: list[list[str]] , SCREAMING_SNAKE_CASE_: int , ) -> None:
'''simple docstring'''
A__ = len(SCREAMING_SNAKE_CASE_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(SCREAMING_SNAKE_CASE_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> None:
'''simple docstring'''
A__ = []
depth_first_search([] , [] , [] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Print all the boards
for board in boards:
for column in board:
print(SCREAMING_SNAKE_CASE_ )
print("" )
print(len(SCREAMING_SNAKE_CASE_ ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 626 | 1 |
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ = """1"""
lowerCAmelCase__ = """0"""
lowerCAmelCase__ = """1"""
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("""Create inference session...""")
lowerCAmelCase__ = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""]
lowerCAmelCase__ = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ = ort.RunOptions()
lowerCAmelCase__ = 1_2_8
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print("""Warm up phase...""")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Start inference...""")
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = 2_0_0_0
lowerCAmelCase__ = {}
for iter in range(max_iters):
lowerCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1_0_0_0 / max_iters))
| 626 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'new-model'
if is_tf_available():
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = NewModelConfig
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = "bert-base-cased"
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = "bert-base-cased"
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForPreTraining.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForCausalLM.from_pretrained(lowercase )
A__ , A__ = TFAutoModelForCausalLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForMaskedLM.from_pretrained(lowercase )
A__ , A__ = TFAutoModelForMaskedLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase )
A__ , A__ = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForSequenceClassification.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForQuestionAnswering.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
@require_tensorflow_probability
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase )
A__ , A__ = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(lowercase , lowercase )
A__ = copy.deepcopy(model.config )
A__ = ["FunnelBaseModel"]
A__ = TFAutoModel.from_config(lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase )
A__ = TFAutoModel.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
try:
AutoConfig.register("new-model" , lowercase )
A__ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowercase ):
auto_class.register(lowercase , lowercase )
auto_class.register(lowercase , lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
auto_class.register(lowercase , lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ = BertModelTester(self ).get_config()
A__ = NewModelConfig(**tiny_config.to_dict() )
A__ = auto_class.from_config(lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase )
A__ = auto_class.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , "bert-base is not a local folder and is not a valid model identifier" ):
A__ = TFAutoModel.from_pretrained("bert-base" )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
A__ = TFAutoModel.from_pretrained(lowercase , revision="aaaaaa" )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
A__ = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(lowercase , "Use `from_pt=True` to load this model" ):
A__ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
A__ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
A__ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
A__ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 626 | 1 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = False
__lowerCamelCase = 3.0
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=lowercase ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
A__ = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
A__ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , lowercase )
@require_multi_gpu
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = ["torchrun", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowercase , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase__ = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
lowerCAmelCase__ = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase__ = torch.nn.Linear(1_0_0, 2_0_0)
lowerCAmelCase__ = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase__ = """"""
lowerCAmelCase__ = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 626 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
lowerCAmelCase__ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ = None
# source code of `config_class`
A__ = inspect.getsource(SCREAMING_SNAKE_CASE_ )
A__ = _re_checkpoint.findall(SCREAMING_SNAKE_CASE_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
A__ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
A__ = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
A__ = ckpt_name
break
return checkpoint
def lowerCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
A__ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
A__ = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE_ )
A__ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
A__ = "\n".join(sorted(SCREAMING_SNAKE_CASE_ ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 626 | 1 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = None
__lowerCamelCase = BloomTokenizerFast
__lowerCamelCase = BloomTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = 'tokenizer_file'
__lowerCamelCase = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
A__ = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , **lowercase ) -> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.get_rust_tokenizer()
A__ = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
A__ = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
A__ = tokenizer.batch_encode_plus(lowercase )["input_ids"]
self.assertListEqual(lowercase , lowercase )
A__ = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def UpperCamelCase ( self , lowercase=6 ) -> List[str]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A__ = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
A__ = "This is a simple input"
A__ = ["This is a simple input 1", "This is a simple input 2"]
A__ = ("This is a simple input", "This is a pair")
A__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(lowercase , max_length=lowercase )
tokenizer_r.encode_plus(lowercase , max_length=lowercase )
tokenizer_r.batch_encode_plus(lowercase , max_length=lowercase )
tokenizer_r.encode(lowercase , max_length=lowercase )
tokenizer_r.batch_encode_plus(lowercase , max_length=lowercase )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
A__ = None # Hotfixing padding = None
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding="max_length" )
# Simple input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding="max_length" )
# Simple input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding="max_length" , )
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding="max_length" )
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding="max_length" )
# Pair input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding="max_length" , )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.get_rust_tokenizer()
A__ = load_dataset("xnli" , "all_languages" , split="test" , streaming=lowercase )
A__ = next(iter(lowercase ) )["premise"] # pick up one data
A__ = list(sample_data.values() )
A__ = list(map(tokenizer.encode , lowercase ) )
A__ = [tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase ) for x in output_tokens]
self.assertListEqual(lowercase , lowercase )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 626 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=0.9 , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , ) -> str:
'''simple docstring'''
A__ = size if size is not None else {"shortest_edge": 30}
A__ = crop_size if crop_size is not None else {"height": 30, "width": 30}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize_and_center_crop
A__ = size
A__ = crop_pct
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
self.assertTrue(hasattr(lowercase , "crop_pct" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 626 | 1 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 626 |
import datasets
from .evaluate import evaluate
lowerCAmelCase__ = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
lowerCAmelCase__ = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
lowerCAmelCase__ = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
A__ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
A__ = evaluate(dataset=lowercase , predictions=lowercase )
return score
| 626 | 1 |
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCAmelCase__ = """src/transformers"""
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase__ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase__ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCAmelCase__ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase__ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase__ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase__ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCAmelCase__ = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowerCAmelCase__ = re.compile(R"""^\s*else:""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> int:
'''simple docstring'''
if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None:
return None
A__ = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ = f.readlines()
A__ = 0
while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
return None
# First grab the objects without a specific backend in _import_structure
A__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
A__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ):
A__ = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0]
A__ = re.findall("\[([^\]]+)\]" , SCREAMING_SNAKE_CASE_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
A__ = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
A__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
A__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
A__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
A__ = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None:
A__ = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(", " )
A__ = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None:
A__ = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(", " )
A__ = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 1_2 + "\"" ):
objects.append(line[1_3:-3] )
line_index += 1
A__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A__ = []
while (
line_index < len(SCREAMING_SNAKE_CASE_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
A__ = lines[line_index]
A__ = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
A__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line is an if is_backend_available, we grab all objects associated.
A__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
A__ = lines[line_index]
A__ = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
A__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[int]:
'''simple docstring'''
def find_duplicates(SCREAMING_SNAKE_CASE_: str ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A__ = []
for key in import_dict_objects.keys():
A__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A__ = "base imports" if key == "none" else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def lowerCAmelCase__ ( ) -> Dict:
'''simple docstring'''
A__ = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , "__init__.py" )
A__ = parse_init(SCREAMING_SNAKE_CASE_ )
if objects is not None:
A__ = analyze_results(*SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
A__ = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("\n".join(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError("\n\n".join(SCREAMING_SNAKE_CASE_ ) )
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
A__ = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(SCREAMING_SNAKE_CASE_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob("*.py" ) ) ) == 0:
continue
A__ = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) )
A__ = short_path.replace(os.path.sep , "." )
submodules.append(SCREAMING_SNAKE_CASE_ )
for fname in files:
if fname == "__init__.py":
continue
A__ = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) )
A__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE_ )
return submodules
lowerCAmelCase__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
A__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(SCREAMING_SNAKE_CASE_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
A__ = spec.loader.load_module()
A__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
A__ = "\n".join(F'- {module}' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F'{list_of_modules}\n'
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 626 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 626 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: List[Any]=False ) -> int:
'''simple docstring'''
A__ = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: str=False ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
A__ = ""
else:
A__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
A__ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> int:
'''simple docstring'''
A__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ = dct.pop(SCREAMING_SNAKE_CASE_ )
A__ = val
def lowerCAmelCase__ ( ) -> str:
'''simple docstring'''
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: List[Any]=False ) -> Dict:
'''simple docstring'''
A__ = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=SCREAMING_SNAKE_CASE_ , )
A__ = ViTHybridConfig(backbone_config=SCREAMING_SNAKE_CASE_ , image_size=3_8_4 , num_labels=1_0_0_0 )
A__ = False
# load original model from timm
A__ = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ = timm_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE_ )
A__ = create_rename_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = "huggingface/label-files"
A__ = "imagenet-1k-id2label.json"
A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
A__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A__ = ViTHybridModel(SCREAMING_SNAKE_CASE_ ).eval()
else:
A__ = ViTHybridForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# create image processor
A__ = create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE_ ) )
A__ = transform.transforms
A__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
A__ = ViTHybridImageProcessor(
do_resize=SCREAMING_SNAKE_CASE_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A__ = prepare_img()
A__ = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
A__ = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# verify logits
with torch.no_grad():
A__ = model(SCREAMING_SNAKE_CASE_ )
A__ = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
A__ = timm_model.forward_features(SCREAMING_SNAKE_CASE_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.pooler_output , atol=1e-3 )
else:
A__ = timm_model(SCREAMING_SNAKE_CASE_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(F'Pushing model and processor to the hub {vit_name}' )
model.push_to_hub(F'ybelkada/{vit_name}' )
processor.push_to_hub(F'ybelkada/{vit_name}' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
lowerCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 626 |
from math import factorial
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 1_0_0 ) -> int:
'''simple docstring'''
return sum(map(SCREAMING_SNAKE_CASE_ , str(factorial(SCREAMING_SNAKE_CASE_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 626 | 1 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Namespace ) -> Tuple:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowerCAmelCase__ = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class a__ ( snake_case ):
"""simple docstring"""
@staticmethod
def UpperCamelCase ( lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=lowercase , required=lowercase , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=lowercase , required=lowercase , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=lowercase , required=lowercase , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=lowercase , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=lowercase , default=lowercase , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=lowercase )
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase , ) -> Union[str, Any]:
'''simple docstring'''
A__ = logging.get_logger("transformers-cli/converting" )
self._logger.info(F'Loading model {model_type}' )
A__ = model_type
A__ = tf_checkpoint
A__ = pytorch_dump_output
A__ = config
A__ = finetuning_task_name
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
if "ckpt" in self._tf_checkpoint.lower():
A__ = self._tf_checkpoint
A__ = ""
else:
A__ = self._tf_checkpoint
A__ = ""
convert_transfo_xl_checkpoint_to_pytorch(
lowercase , self._config , self._pytorch_dump_output , lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 626 |
lowerCAmelCase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: bytes ) -> bytes:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(SCREAMING_SNAKE_CASE_ )
A__ = "".join(bin(SCREAMING_SNAKE_CASE_ )[2:].zfill(8 ) for byte in data )
A__ = len(SCREAMING_SNAKE_CASE_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
A__ = b"=" * ((6 - len(SCREAMING_SNAKE_CASE_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE_ ) % 6)
else:
A__ = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 6 ) ).encode()
+ padding
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> bytes:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = (
"argument should be a bytes-like object or ASCII string, "
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(SCREAMING_SNAKE_CASE_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
try:
A__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
A__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
A__ = encoded_data[:-padding]
A__ = "".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
A__ = "".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE_ ) )[2:].zfill(6 ) for char in encoded_data )
A__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 8 )
]
return bytes(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 626 | 1 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> None:
'''simple docstring'''
A__ , A__ = analyze_text(SCREAMING_SNAKE_CASE_ )
A__ = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
A__ = sum(single_char_strings.values() )
# one length string
A__ = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
A__ = single_char_strings[ch]
A__ = my_str / all_sum
my_fir_sum += prob * math.loga(SCREAMING_SNAKE_CASE_ ) # entropy formula.
# print entropy
print(F'{round(-1 * my_fir_sum ):.1f}' )
# two len string
A__ = sum(two_char_strings.values() )
A__ = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
A__ = cha + cha
if sequence in two_char_strings:
A__ = two_char_strings[sequence]
A__ = int(SCREAMING_SNAKE_CASE_ ) / all_sum
my_sec_sum += prob * math.loga(SCREAMING_SNAKE_CASE_ )
# print second entropy
print(F'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> tuple[dict, dict]:
'''simple docstring'''
A__ = Counter() # type: ignore
A__ = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 626 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__lowerCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(default=snake_case , metadata={'help': 'The input training data file (a text file).'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if self.train_file is not None:
A__ = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
A__ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = True
__lowerCamelCase = None
__lowerCamelCase = None
def __call__( self , lowercase ) -> Tuple:
'''simple docstring'''
A__ = "label" if "label" in features[0].keys() else "labels"
A__ = [feature.pop(lowercase ) for feature in features]
A__ = len(lowercase )
A__ = len(features[0]["input_ids"] )
A__ = [
[{k: v[i] for k, v in feature.items()} for i in range(lowercase )] for feature in features
]
A__ = list(chain(*lowercase ) )
A__ = self.tokenizer.pad(
lowercase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
A__ = {k: v.view(lowercase , lowercase , -1 ) for k, v in batch.items()}
# Add back labels
A__ = torch.tensor(lowercase , dtype=torch.intaa )
return batch
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
A__ = {}
if data_args.train_file is not None:
A__ = data_args.train_file
if data_args.validation_file is not None:
A__ = data_args.validation_file
A__ = data_args.train_file.split("." )[-1]
A__ = load_dataset(
SCREAMING_SNAKE_CASE_ , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
A__ = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
A__ = [F'ending{i}' for i in range(4 )]
A__ = "sent1"
A__ = "sent2"
if data_args.max_seq_length is None:
A__ = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
A__ = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
A__ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(SCREAMING_SNAKE_CASE_: Optional[Any] ):
A__ = [[context] * 4 for context in examples[context_name]]
A__ = examples[question_header_name]
A__ = [
[F'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(SCREAMING_SNAKE_CASE_ )
]
# Flatten out
A__ = list(chain(*SCREAMING_SNAKE_CASE_ ) )
A__ = list(chain(*SCREAMING_SNAKE_CASE_ ) )
# Tokenize
A__ = tokenizer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
A__ = raw_datasets["train"]
if data_args.max_train_samples is not None:
A__ = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_train_samples )
A__ = train_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
A__ = train_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
A__ = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
A__ = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_eval_samples )
A__ = eval_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
A__ = eval_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
A__ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(SCREAMING_SNAKE_CASE_: str ):
A__ , A__ = eval_predictions
A__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
A__ = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ = train_result.metrics
A__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ )
)
A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE_ )
A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE_ )
A__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 626 | 1 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 3_2
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Accelerator , SCREAMING_SNAKE_CASE_: int = 1_6 ) -> Any:
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained("bert-base-cased" )
A__ = load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE_: str ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_: Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 1_6
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding="longest" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="pt" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ )
A__ = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> str:
'''simple docstring'''
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["lr"]
A__ = int(config["num_epochs"] )
A__ = int(config["seed"] )
A__ = int(config["batch_size"] )
A__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE_ )
A__ , A__ = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=1_0_0 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**SCREAMING_SNAKE_CASE_ )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**SCREAMING_SNAKE_CASE_ )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
A__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
A__ = parser.parse_args()
A__ = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 626 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
A__ = 1
A__ = 1
while repunit:
A__ = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
A__ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(SCREAMING_SNAKE_CASE_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 626 | 1 |
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Dict ) -> Union[str, Any]: # noqa: E741
'''simple docstring'''
while r - l > 1:
A__ = (l + r) // 2
if v[m] >= key:
A__ = m
else:
A__ = m # noqa: E741
return r
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list[int] ) -> int:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return 0
A__ = [0] * len(SCREAMING_SNAKE_CASE_ )
A__ = 1
A__ = v[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
if v[i] < tail[0]:
A__ = v[i]
elif v[i] > tail[length - 1]:
A__ = v[i]
length += 1
else:
A__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 626 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class a__ :
"""simple docstring"""
def __init__( self , lowercase ) -> int:
'''simple docstring'''
A__ = data
A__ = None
class a__ :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
A__ = None
A__ = None
def __iter__( self ) -> Iterator[Any]:
'''simple docstring'''
A__ = self.head
while self.head:
yield node.data
A__ = node.next
if node == self.head:
break
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ) -> List[Any]:
'''simple docstring'''
return "->".join(str(lowercase ) for item in iter(self ) )
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , lowercase )
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
self.insert_nth(0 , lowercase )
def UpperCamelCase ( self , lowercase , lowercase ) -> None:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
A__ = Node(lowercase )
if self.head is None:
A__ = new_node # first node points itself
A__ = A__ = new_node
elif index == 0: # insert at head
A__ = self.head
A__ = A__ = new_node
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = new_node
if index == len(self ) - 1: # insert at tail
A__ = new_node
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase ( self , lowercase = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
A__ = self.head
if self.head == self.tail: # just one node
A__ = A__ = None
elif index == 0: # delete head node
A__ = self.tail.next.next
A__ = self.head.next
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = temp.next.next
if index == len(self ) - 1: # delete at tail
A__ = temp
return delete_node.data
def UpperCamelCase ( self ) -> bool:
'''simple docstring'''
return len(self ) == 0
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
A__ = CircularLinkedList()
assert len(SCREAMING_SNAKE_CASE_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(SCREAMING_SNAKE_CASE_ ) == i
circular_linked_list.insert_nth(SCREAMING_SNAKE_CASE_ , i + 1 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 626 | 1 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter image url: """).strip()
print(f"""Downloading image from {url} ...""")
lowerCAmelCase__ = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
lowerCAmelCase__ = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
lowerCAmelCase__ = requests.get(image_url).content
lowerCAmelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 626 |
import math
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 7
lowerCAmelCase__ = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 2_0 ) -> str:
'''simple docstring'''
A__ = math.comb(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , SCREAMING_SNAKE_CASE_ )
A__ = NUM_COLOURS * (1 - missing_colour / total)
return F'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0))
| 626 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> List[Any]:
'''simple docstring'''
A__ = 1
A__ = 2
while i * i <= n:
A__ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
A__ = 1
A__ = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE_ ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 626 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str = "cpu" , SCREAMING_SNAKE_CASE_: Union[str, None] = None ) -> None:
'''simple docstring'''
A__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location=SCREAMING_SNAKE_CASE_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
A__ = v.half()
if save_path is None: # overwrite src_path
A__ = src_path
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
fire.Fire(convert)
| 626 | 1 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCAmelCase__ = get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: List[str]=0 ) -> Tuple:
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A__ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A__ = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if accelerator.process_index == 0:
logger.info(F'Saving model to {output_model_file}' )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A__ = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F'Saving model to {output_model_file}' )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , F'{MODEL_NAME}_{model_index}' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
logger.info(F'Saving model to {ckpt_dir}' )
A__ = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=SCREAMING_SNAKE_CASE_ , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , )
logger.info(F'Model saved to {ckpt_dir}' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Union[str, Any]=0 ) -> Dict:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(SCREAMING_SNAKE_CASE_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
A__ = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F'Loading model from {input_model_file}' )
A__ = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A__ = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F'Loading model from {input_model_file}' )
A__ = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A__ = (
os.path.join(SCREAMING_SNAKE_CASE_ , F'{MODEL_NAME}_{model_index}' )
if F'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading model from {ckpt_dir}' )
A__ = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=SCREAMING_SNAKE_CASE_ , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , planner=DefaultLoadPlanner() , )
A__ = state_dict["model"]
logger.info(F'Model loaded from {ckpt_dir}' )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[int]=0 ) -> List[Any]:
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A__ = FSDP.optim_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
A__ = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F'Saving Optimizer state to {output_optimizer_file}' )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F'Optimizer state saved in {output_optimizer_file}' )
else:
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
logger.info(F'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , )
logger.info(F'Optimizer state saved in {ckpt_dir}' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[Any]=0 ) -> Tuple:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A__ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
A__ = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F'Loading Optimizer state from {input_optimizer_file}' )
A__ = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F'Optimizer state loaded from {input_optimizer_file}' )
else:
A__ = (
os.path.join(SCREAMING_SNAKE_CASE_ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
if F'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading Optimizer from {ckpt_dir}' )
A__ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , )
A__ = optim_state["optimizer"]
logger.info(F'Optimizer loaded from {ckpt_dir}' )
A__ = FSDP.optim_state_dict_to_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
optimizer.load_state_dict(SCREAMING_SNAKE_CASE_ )
| 626 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(
metadata={'help': 'The output directory where the model will be written.'} , )
__lowerCamelCase = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} , )
__lowerCamelCase = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
A__ = HfArgumentParser((ModelArguments,) )
((A__) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
A__ = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
A__ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
A__ = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
A__ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
A__ = True
A__ = True
A__ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=SCREAMING_SNAKE_CASE_ , decoder_config=SCREAMING_SNAKE_CASE_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
A__ = decoder_config.decoder_start_token_id
A__ = decoder_config.pad_token_id
if decoder_start_token_id is None:
A__ = decoder_config.bos_token_id
if pad_token_id is None:
A__ = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
A__ = decoder_config.eos_token_id
A__ = decoder_start_token_id
A__ = pad_token_id
A__ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
A__ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
A__ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 626 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list ) -> list:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return lst
A__ = 1
while i < len(SCREAMING_SNAKE_CASE_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
A__ , A__ = lst[i], lst[i - 1]
i -= 1
if i == 0:
A__ = 1
return lst
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 626 |
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCAmelCase__ = """src/transformers"""
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase__ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase__ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCAmelCase__ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase__ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase__ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase__ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCAmelCase__ = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowerCAmelCase__ = re.compile(R"""^\s*else:""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> int:
'''simple docstring'''
if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None:
return None
A__ = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ = f.readlines()
A__ = 0
while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
return None
# First grab the objects without a specific backend in _import_structure
A__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
A__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ):
A__ = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0]
A__ = re.findall("\[([^\]]+)\]" , SCREAMING_SNAKE_CASE_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
A__ = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
A__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
A__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
A__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
A__ = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None:
A__ = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(", " )
A__ = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None:
A__ = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(", " )
A__ = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 1_2 + "\"" ):
objects.append(line[1_3:-3] )
line_index += 1
A__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A__ = []
while (
line_index < len(SCREAMING_SNAKE_CASE_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
A__ = lines[line_index]
A__ = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
A__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line is an if is_backend_available, we grab all objects associated.
A__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
A__ = lines[line_index]
A__ = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
A__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[int]:
'''simple docstring'''
def find_duplicates(SCREAMING_SNAKE_CASE_: str ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A__ = []
for key in import_dict_objects.keys():
A__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A__ = "base imports" if key == "none" else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def lowerCAmelCase__ ( ) -> Dict:
'''simple docstring'''
A__ = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , "__init__.py" )
A__ = parse_init(SCREAMING_SNAKE_CASE_ )
if objects is not None:
A__ = analyze_results(*SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
A__ = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("\n".join(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError("\n\n".join(SCREAMING_SNAKE_CASE_ ) )
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
A__ = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(SCREAMING_SNAKE_CASE_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob("*.py" ) ) ) == 0:
continue
A__ = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) )
A__ = short_path.replace(os.path.sep , "." )
submodules.append(SCREAMING_SNAKE_CASE_ )
for fname in files:
if fname == "__init__.py":
continue
A__ = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) )
A__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE_ )
return submodules
lowerCAmelCase__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
A__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(SCREAMING_SNAKE_CASE_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
A__ = spec.loader.load_module()
A__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
A__ = "\n".join(F'- {module}' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F'{list_of_modules}\n'
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 626 | 1 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """Hello, World!"""
lowerCAmelCase__ = """en_XX"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: bool ) -> List[str]:
'''simple docstring'''
A__ = Path("data_bin" )
A__ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE_ ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE_ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(SCREAMING_SNAKE_CASE_ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE_ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE_ )
A__ = xmod.model.encoder.sentence_encoder
A__ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A__ = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , SCREAMING_SNAKE_CASE_ )
A__ = XmodForSequenceClassification(SCREAMING_SNAKE_CASE_ ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
A__ = xmod_sent_encoder.embed_tokens.weight
A__ = xmod_sent_encoder.embed_positions.weight
A__ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A__ = xmod_sent_encoder.layernorm_embedding.weight
A__ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A__ = model.roberta.encoder.layer[i]
A__ = xmod_sent_encoder.layers[i]
# self attention
A__ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
A__ = xmod_layer.self_attn.q_proj.weight
A__ = xmod_layer.self_attn.q_proj.bias
A__ = xmod_layer.self_attn.k_proj.weight
A__ = xmod_layer.self_attn.k_proj.bias
A__ = xmod_layer.self_attn.v_proj.weight
A__ = xmod_layer.self_attn.v_proj.bias
# self-attention output
A__ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
A__ = xmod_layer.self_attn.out_proj.weight
A__ = xmod_layer.self_attn.out_proj.bias
A__ = xmod_layer.self_attn_layer_norm.weight
A__ = xmod_layer.self_attn_layer_norm.bias
# intermediate
A__ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
A__ = xmod_layer.fca.weight
A__ = xmod_layer.fca.bias
# output
A__ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
A__ = xmod_layer.fca.weight
A__ = xmod_layer.fca.bias
A__ = xmod_layer.final_layer_norm.weight
A__ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A__ = xmod_layer.adapter_layer_norm.weight
A__ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A__ = bert_output.adapter_modules[lang_code]
A__ = xmod_layer.adapter_modules[lang_code]
A__ = from_adapter.fca.weight
A__ = from_adapter.fca.bias
A__ = from_adapter.fca.weight
A__ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A__ = xmod_sent_encoder.layer_norm.weight
A__ = xmod_sent_encoder.layer_norm.bias
if classification_head:
A__ = xmod.model.classification_heads["mnli"].dense.weight
A__ = xmod.model.classification_heads["mnli"].dense.bias
A__ = xmod.model.classification_heads["mnli"].out_proj.weight
A__ = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
A__ = xmod.model.encoder.lm_head.dense.weight
A__ = xmod.model.encoder.lm_head.dense.bias
A__ = xmod.model.encoder.lm_head.layer_norm.weight
A__ = xmod.model.encoder.lm_head.layer_norm.bias
A__ = xmod.model.encoder.lm_head.weight
A__ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A__ = xmod.encode(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE_ )
A__ = model(SCREAMING_SNAKE_CASE_ )[0]
if classification_head:
A__ = xmod.model.classification_heads["mnli"](xmod.extract_features(SCREAMING_SNAKE_CASE_ ) )
else:
A__ = xmod.model(SCREAMING_SNAKE_CASE_ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A__ = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
A__ = torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
lowerCAmelCase__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 626 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase ) -> None:
'''simple docstring'''
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , lowercase , )
super().__init__(*lowercase , **lowercase )
| 626 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCAmelCase__ = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCAmelCase__ = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCAmelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str ) -> tuple[str, float]:
'''simple docstring'''
A__ = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE_ ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE_ ))
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str ) -> tuple[str, str]:
'''simple docstring'''
A__ = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
A__ = parent_a[:random_slice] + parent_a[random_slice:]
A__ = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: list[str] ) -> str:
'''simple docstring'''
A__ = list(SCREAMING_SNAKE_CASE_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
A__ = random.choice(SCREAMING_SNAKE_CASE_ )
return "".join(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: tuple[str, float] , SCREAMING_SNAKE_CASE_: list[tuple[str, float]] , SCREAMING_SNAKE_CASE_: list[str] , ) -> list[str]:
'''simple docstring'''
A__ = []
# Generate more children proportionally to the fitness score.
A__ = int(parent_a[1] * 1_0_0 ) + 1
A__ = 1_0 if child_n >= 1_0 else child_n
for _ in range(SCREAMING_SNAKE_CASE_ ):
A__ = population_score[random.randint(0 , SCREAMING_SNAKE_CASE_ )][0]
A__ , A__ = crossover(parent_a[0] , SCREAMING_SNAKE_CASE_ )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
pop.append(mutate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
return pop
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: list[str] , SCREAMING_SNAKE_CASE_: bool = True ) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
A__ = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(SCREAMING_SNAKE_CASE_ )
# Verify that the target contains no genes besides the ones inside genes variable.
A__ = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
A__ = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(SCREAMING_SNAKE_CASE_ )
# Generate random starting population.
A__ = []
for _ in range(SCREAMING_SNAKE_CASE_ ):
population.append("".join([random.choice(SCREAMING_SNAKE_CASE_ ) for i in range(len(SCREAMING_SNAKE_CASE_ ) )] ) )
# Just some logs to know what the algorithms is doing.
A__ , A__ = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
A__ = [evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for item in population]
# Check if there is a matching evolution.
A__ = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] , reverse=SCREAMING_SNAKE_CASE_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
A__ = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE_ )
# Normalize population score to be between 0 and 1.
A__ = [
(item, score / len(SCREAMING_SNAKE_CASE_ )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE_ ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE_ )] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCAmelCase__ = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
lowerCAmelCase__ = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 626 |
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: list[str] | None = None ) -> list[list[str]]:
'''simple docstring'''
A__ = word_bank or []
# create a table
A__ = len(SCREAMING_SNAKE_CASE_ ) + 1
A__ = []
for _ in range(SCREAMING_SNAKE_CASE_ ):
table.append([] )
# seed value
A__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(SCREAMING_SNAKE_CASE_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(SCREAMING_SNAKE_CASE_ )] == word:
A__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(SCREAMING_SNAKE_CASE_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(SCREAMING_SNAKE_CASE_ )]:
combination.reverse()
return table[len(SCREAMING_SNAKE_CASE_ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 626 | 1 |
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: list[list[str]] , SCREAMING_SNAKE_CASE_: int , ) -> None:
'''simple docstring'''
A__ = len(SCREAMING_SNAKE_CASE_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(SCREAMING_SNAKE_CASE_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> None:
'''simple docstring'''
A__ = []
depth_first_search([] , [] , [] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Print all the boards
for board in boards:
for column in board:
print(SCREAMING_SNAKE_CASE_ )
print("" )
print(len(SCREAMING_SNAKE_CASE_ ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 626 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: str=1_0_2_4 ) -> Any:
'''simple docstring'''
A__ , A__ = [], []
A__ = list(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
A__ , A__ = sorted_examples[0]
def is_too_big(SCREAMING_SNAKE_CASE_: List[str] ):
return tok(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
A__ = new_src + " " + src
A__ = new_tgt + " " + tgt
if is_too_big(SCREAMING_SNAKE_CASE_ ) or is_too_big(SCREAMING_SNAKE_CASE_ ): # cant fit, finalize example
finished_src.append(SCREAMING_SNAKE_CASE_ )
finished_tgt.append(SCREAMING_SNAKE_CASE_ )
A__ , A__ = src, tgt
else: # can fit, keep adding
A__ , A__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(SCREAMING_SNAKE_CASE_ )
finished_tgt.append(SCREAMING_SNAKE_CASE_ )
return finished_src, finished_tgt
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Path , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ = Path(SCREAMING_SNAKE_CASE_ )
save_path.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
for split in ["train"]:
A__ , A__ = data_dir / F'{split}.source', data_dir / F'{split}.target'
A__ = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE_ ).open().readlines()]
A__ = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE_ ).open().readlines()]
A__ , A__ = pack_examples(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F'packed {split} split from {len(SCREAMING_SNAKE_CASE_ )} examples -> {len(SCREAMING_SNAKE_CASE_ )}.' )
Path(save_path / F'{split}.source' ).open("w" ).write("\n".join(SCREAMING_SNAKE_CASE_ ) )
Path(save_path / F'{split}.target' ).open("w" ).write("\n".join(SCREAMING_SNAKE_CASE_ ) )
for split in ["val", "test"]:
A__ , A__ = data_dir / F'{split}.source', data_dir / F'{split}.target'
shutil.copyfile(SCREAMING_SNAKE_CASE_ , save_path / F'{split}.source' )
shutil.copyfile(SCREAMING_SNAKE_CASE_ , save_path / F'{split}.target' )
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
A__ = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=SCREAMING_SNAKE_CASE_ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=SCREAMING_SNAKE_CASE_ , default=1_2_8 )
parser.add_argument("--data_dir" , type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("--save_path" , type=SCREAMING_SNAKE_CASE_ )
A__ = parser.parse_args()
A__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(SCREAMING_SNAKE_CASE_ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 626 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__lowerCamelCase = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
__lowerCamelCase = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'A csv or a json file containing the training data.'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'A csv or a json file containing the validation data.'} )
__lowerCamelCase = field(default=snake_case , metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." )
else:
A__ = self.train_file.split("." )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
A__ = self.validation_file.split("." )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__lowerCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
A__ = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
A__ = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
A__ = data_args.train_file.split("." )[-1]
A__ = data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
A__ = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(F'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
A__ = load_dataset("csv" , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
A__ = load_dataset("json" , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
A__ = raw_datasets["train"].features["label"].names
A__ = len(SCREAMING_SNAKE_CASE_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
A__ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=SCREAMING_SNAKE_CASE_ , )
A__ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
A__ = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
A__ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
A__ = {"Refused": 0, "Entailed": 1}
A__ = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
A__ = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(SCREAMING_SNAKE_CASE_: int ):
# Tokenize the texts
def _convert_table_text_to_pandas(SCREAMING_SNAKE_CASE_: str ):
A__ = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
A__ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
A__ = examples["statement"]
A__ = list(map(_convert_table_text_to_pandas , examples["table_text"] ) )
A__ = tokenizer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )
A__ = examples["label"]
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
A__ = raw_datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
A__ = raw_datasets["train"]
if data_args.max_train_samples is not None:
A__ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
A__ = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
A__ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
A__ = raw_datasets["test"]
if data_args.max_predict_samples is not None:
A__ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE_ ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE_: EvalPrediction ):
A__ = p.predictions[0] if isinstance(p.predictions , SCREAMING_SNAKE_CASE_ ) else p.predictions
A__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
A__ = default_data_collator
elif training_args.fpaa:
A__ = DataCollatorWithPadding(SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 )
else:
A__ = None
# Initialize our Trainer
A__ = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
A__ = train_result.metrics
A__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ )
)
A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE_ )
A__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE_ )
A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE_ )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
A__ = predict_dataset.remove_columns("label" )
A__ = trainer.predict(SCREAMING_SNAKE_CASE_ , metric_key_prefix="predict" ).predictions
A__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
A__ = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE_ , "w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(SCREAMING_SNAKE_CASE_ ):
A__ = label_list[item]
writer.write(F'{index}\t{item}\n' )
A__ = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 626 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Namespace ) -> Tuple:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowerCAmelCase__ = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class a__ ( snake_case ):
"""simple docstring"""
@staticmethod
def UpperCamelCase ( lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=lowercase , required=lowercase , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=lowercase , required=lowercase , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=lowercase , required=lowercase , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=lowercase , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=lowercase , default=lowercase , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=lowercase )
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase , ) -> Union[str, Any]:
'''simple docstring'''
A__ = logging.get_logger("transformers-cli/converting" )
self._logger.info(F'Loading model {model_type}' )
A__ = model_type
A__ = tf_checkpoint
A__ = pytorch_dump_output
A__ = config
A__ = finetuning_task_name
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
if "ckpt" in self._tf_checkpoint.lower():
A__ = self._tf_checkpoint
A__ = ""
else:
A__ = self._tf_checkpoint
A__ = ""
convert_transfo_xl_checkpoint_to_pytorch(
lowercase , self._config , self._pytorch_dump_output , lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 626 | 1 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowerCAmelCase__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class a__ ( datasets.BuilderConfig ):
"""simple docstring"""
__lowerCamelCase = 10000
__lowerCamelCase = None
__lowerCamelCase = None
class a__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__lowerCamelCase = ParquetConfig
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase ( self , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
A__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowercase , (str, list, tuple) ):
A__ = data_files
if isinstance(lowercase , lowercase ):
A__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A__ = [dl_manager.iter_files(lowercase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
A__ = []
for split_name, files in data_files.items():
if isinstance(lowercase , lowercase ):
A__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A__ = [dl_manager.iter_files(lowercase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(lowercase ):
with open(lowercase , "rb" ) as f:
A__ = datasets.Features.from_arrow_schema(pq.read_schema(lowercase ) )
break
splits.append(datasets.SplitGenerator(name=lowercase , gen_kwargs={"files": files} ) )
return splits
def UpperCamelCase ( self , lowercase ) -> pa.Table:
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ = table_cast(lowercase , self.info.features.arrow_schema )
return pa_table
def UpperCamelCase ( self , lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(lowercase ) ):
with open(lowercase , "rb" ) as f:
A__ = pq.ParquetFile(lowercase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
A__ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'{file_idx}_{batch_idx}', self._cast_table(lowercase )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(lowercase )}: {e}' )
raise
| 626 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , lowercase=1 / 255 , lowercase=True , ) -> Union[str, Any]:
'''simple docstring'''
A__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self , lowercase , lowercase=False ) -> int:
'''simple docstring'''
if not batched:
A__ = image_inputs[0]
if isinstance(lowercase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["shortest_edge"] * h / w )
A__ = self.size["shortest_edge"]
elif w > h:
A__ = self.size["shortest_edge"]
A__ = int(self.size["shortest_edge"] * w / h )
else:
A__ = self.size["shortest_edge"]
A__ = self.size["shortest_edge"]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(lowercase , key=lambda lowercase : item[0] )[0]
A__ = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = DetaImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = DetaImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "do_rescale" ) )
self.assertTrue(hasattr(lowercase , "do_pad" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
A__ = json.loads(f.read() )
A__ = {"image_id": 39769, "annotations": target}
# encode them
A__ = DetaImageProcessor()
A__ = image_processing(images=lowercase , annotations=lowercase , return_tensors="pt" )
# verify pixel values
A__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowercase )
A__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase )
A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase ) )
# verify orig_size
A__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase ) )
# verify size
A__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase ) )
@slow
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
A__ = json.loads(f.read() )
A__ = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
A__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
A__ = DetaImageProcessor(format="coco_panoptic" )
A__ = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors="pt" )
# verify pixel values
A__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowercase )
A__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase )
A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase ) )
# verify masks
A__ = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowercase )
# verify orig_size
A__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase ) )
# verify size
A__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase ) )
| 626 | 1 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCAmelCase__ = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCAmelCase__ = """main"""
# Default branch name
lowerCAmelCase__ = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
lowerCAmelCase__ = """aaaaaaa"""
# This commit does not exist, so we should 404.
lowerCAmelCase__ = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCAmelCase__ = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
print("Bonjour!" )
yield
print("Au revoir!" )
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class a__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCamelCase ( self , lowercase ) -> Union[str, Any]:
'''simple docstring'''
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCamelCase ( self , lowercase ) -> Tuple:
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(find_labels(lowercase ) , ["labels"] )
self.assertEqual(find_labels(lowercase ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(lowercase ) , ["start_positions", "end_positions"] )
class a__ ( snake_case ):
"""simple docstring"""
pass
self.assertEqual(find_labels(lowercase ) , ["labels"] )
@require_tf
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
self.assertEqual(find_labels(lowercase ) , ["labels"] )
self.assertEqual(find_labels(lowercase ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(lowercase ) , ["start_positions", "end_positions"] )
class a__ ( snake_case ):
"""simple docstring"""
pass
self.assertEqual(find_labels(lowercase ) , ["labels"] )
@require_flax
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(find_labels(lowercase ) , [] )
self.assertEqual(find_labels(lowercase ) , [] )
self.assertEqual(find_labels(lowercase ) , [] )
class a__ ( snake_case ):
"""simple docstring"""
pass
self.assertEqual(find_labels(lowercase ) , [] )
| 626 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
A__ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
A__ = model(lowercase )["last_hidden_state"]
A__ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice.
A__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 626 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'naver-clova-ix/donut-base-finetuned-docvqa'
__lowerCamelCase = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
__lowerCamelCase = 'document_qa'
__lowerCamelCase = AutoProcessor
__lowerCamelCase = VisionEncoderDecoderModel
__lowerCamelCase = ['image', 'text']
__lowerCamelCase = ['text']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
'''simple docstring'''
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase ) -> List[str]:
'''simple docstring'''
A__ = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
A__ = task_prompt.replace("{user_input}" , lowercase )
A__ = self.pre_processor.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors="pt" ).input_ids
A__ = self.pre_processor(lowercase , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCamelCase ( self , lowercase ) -> Any:
'''simple docstring'''
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=lowercase , ).sequences
def UpperCamelCase ( self , lowercase ) -> Any:
'''simple docstring'''
A__ = self.pre_processor.batch_decode(lowercase )[0]
A__ = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
A__ = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
A__ = re.sub(R"<.*?>" , "" , lowercase , count=1 ).strip() # remove first task start token
A__ = self.pre_processor.tokenajson(lowercase )
return sequence["answer"]
| 626 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = DanceDiffusionPipeline
__lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
__lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowercase , use_timestep_embedding=lowercase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
A__ = IPNDMScheduler()
A__ = {
"unet": unet,
"scheduler": scheduler,
}
return components
def UpperCamelCase ( self , lowercase , lowercase=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(lowercase ).startswith("mps" ):
A__ = torch.manual_seed(lowercase )
else:
A__ = torch.Generator(device=lowercase ).manual_seed(lowercase )
A__ = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = DanceDiffusionPipeline(**lowercase )
A__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A__ = self.get_dummy_inputs(lowercase )
A__ = pipe(**lowercase )
A__ = output.audios
A__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
A__ = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = torch_device
A__ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
A__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A__ = torch.manual_seed(0 )
A__ = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.096 )
A__ = output.audios
A__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
A__ = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = torch_device
A__ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
A__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A__ = torch.manual_seed(0 )
A__ = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.096 )
A__ = output.audios
A__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
A__ = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 626 | 1 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 626 |
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: list[list[str]] , SCREAMING_SNAKE_CASE_: int , ) -> None:
'''simple docstring'''
A__ = len(SCREAMING_SNAKE_CASE_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(SCREAMING_SNAKE_CASE_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> None:
'''simple docstring'''
A__ = []
depth_first_search([] , [] , [] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Print all the boards
for board in boards:
for column in board:
print(SCREAMING_SNAKE_CASE_ )
print("" )
print(len(SCREAMING_SNAKE_CASE_ ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 626 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
A__ = filter(lambda SCREAMING_SNAKE_CASE_ : p.requires_grad , model.parameters() )
A__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if metric == "rouge2":
A__ = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
A__ = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
A__ = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
A__ = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
A__ = ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , monitor=F'val_{metric}' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Tuple ) -> List[Any]:
'''simple docstring'''
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , )
class a__ ( pl.Callback ):
"""simple docstring"""
def UpperCamelCase ( self , lowercase , lowercase ) -> List[str]:
'''simple docstring'''
A__ = {F'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowercase )
@rank_zero_only
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase=True ) -> None:
'''simple docstring'''
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
A__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
A__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
A__ = od / "test_results.txt"
A__ = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A__ = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
A__ = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=lowercase )
generations_file.parent.mkdir(exist_ok=lowercase )
with open(lowercase , "a+" ) as writer:
for key in sorted(lowercase ):
if key in ["log", "progress_bar", "preds"]:
continue
A__ = metrics[key]
if isinstance(lowercase , torch.Tensor ):
A__ = val.item()
A__ = F'{key}: {val:.6f}\n'
writer.write(lowercase )
if not save_generations:
return
if "preds" in metrics:
A__ = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(lowercase )
@rank_zero_only
def UpperCamelCase ( self , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
try:
A__ = pl_module.model.model.num_parameters()
except AttributeError:
A__ = pl_module.model.num_parameters()
A__ = count_trainable_parameters(lowercase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase ( self , lowercase , lowercase ) -> int:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowercase , lowercase , "test" )
@rank_zero_only
def UpperCamelCase ( self , lowercase , lowercase ) -> str:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 626 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'new-model'
if is_tf_available():
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = NewModelConfig
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = "bert-base-cased"
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = "bert-base-cased"
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForPreTraining.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForCausalLM.from_pretrained(lowercase )
A__ , A__ = TFAutoModelForCausalLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForMaskedLM.from_pretrained(lowercase )
A__ , A__ = TFAutoModelForMaskedLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase )
A__ , A__ = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForSequenceClassification.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForQuestionAnswering.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
@require_tensorflow_probability
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
A__ = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A__ = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase )
A__ , A__ = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(lowercase , lowercase )
A__ = copy.deepcopy(model.config )
A__ = ["FunnelBaseModel"]
A__ = TFAutoModel.from_config(lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase )
A__ = TFAutoModel.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
try:
AutoConfig.register("new-model" , lowercase )
A__ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowercase ):
auto_class.register(lowercase , lowercase )
auto_class.register(lowercase , lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
auto_class.register(lowercase , lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ = BertModelTester(self ).get_config()
A__ = NewModelConfig(**tiny_config.to_dict() )
A__ = auto_class.from_config(lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase )
A__ = auto_class.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , "bert-base is not a local folder and is not a valid model identifier" ):
A__ = TFAutoModel.from_pretrained("bert-base" )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
A__ = TFAutoModel.from_pretrained(lowercase , revision="aaaaaa" )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
A__ = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(lowercase , "Use `from_pt=True` to load this model" ):
A__ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
A__ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
A__ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
A__ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 626 | 1 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase__ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase__ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase__ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class a__ :
"""simple docstring"""
__lowerCamelCase = 200
__lowerCamelCase = {'Content-Length': '100'}
__lowerCamelCase = {}
def UpperCamelCase ( self , **lowercase ) -> int:
'''simple docstring'''
return [bytes(lowercase , "utf-8" )]
def lowerCAmelCase__ ( *SCREAMING_SNAKE_CASE_: Any , **SCREAMING_SNAKE_CASE_: Any ) -> int:
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Any:
'''simple docstring'''
import requests
monkeypatch.setattr(SCREAMING_SNAKE_CASE_ , "request" , SCREAMING_SNAKE_CASE_ )
A__ = URL
if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = url
elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = [url]
elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = {"train": url}
A__ = "dummy"
A__ = "downloads"
A__ = tmp_path
A__ = DownloadConfig(
cache_dir=os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , use_etag=SCREAMING_SNAKE_CASE_ , )
A__ = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
A__ = dl_manager.download(SCREAMING_SNAKE_CASE_ )
A__ = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = [downloaded_paths]
A__ = [urls]
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert "train" in downloaded_paths.keys()
A__ = downloaded_paths.values()
A__ = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
A__ = Path(SCREAMING_SNAKE_CASE_ )
A__ = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
A__ = downloaded_path.read_text()
assert content == CONTENT
A__ = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
A__ = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> str:
'''simple docstring'''
A__ = str(SCREAMING_SNAKE_CASE_ )
if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = filename
elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = [filename]
elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = {"train": filename}
A__ = "dummy"
A__ = xz_file.parent
A__ = "extracted"
A__ = DownloadConfig(
cache_dir=SCREAMING_SNAKE_CASE_ , use_etag=SCREAMING_SNAKE_CASE_ , )
A__ = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
A__ = dl_manager.extract(SCREAMING_SNAKE_CASE_ )
A__ = paths
for extracted_paths in [extracted_paths]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = [extracted_paths]
A__ = [paths]
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert "train" in extracted_paths.keys()
A__ = extracted_paths.values()
A__ = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
A__ = Path(SCREAMING_SNAKE_CASE_ )
A__ = extracted_path.parts
assert parts[-1] == hash_url_to_filename(SCREAMING_SNAKE_CASE_ , etag=SCREAMING_SNAKE_CASE_ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
A__ = extracted_path.read_text()
A__ = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: List[Any] ) -> int:
'''simple docstring'''
assert path.endswith(".jsonl" )
for num_items, line in enumerate(SCREAMING_SNAKE_CASE_ , start=1 ):
A__ = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
A__ = request.getfixturevalue(SCREAMING_SNAKE_CASE_ )
A__ = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE_ ) , start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] ) -> Tuple:
'''simple docstring'''
A__ = request.getfixturevalue(SCREAMING_SNAKE_CASE_ )
A__ = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE_ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE_ ) , start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert num_tar == 1
assert num_jsonl == 2
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Any:
'''simple docstring'''
A__ = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) , start=1 ):
assert os.path.basename(SCREAMING_SNAKE_CASE_ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 626 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
lowerCAmelCase__ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ = None
# source code of `config_class`
A__ = inspect.getsource(SCREAMING_SNAKE_CASE_ )
A__ = _re_checkpoint.findall(SCREAMING_SNAKE_CASE_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
A__ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
A__ = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
A__ = ckpt_name
break
return checkpoint
def lowerCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
A__ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
A__ = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE_ )
A__ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
A__ = "\n".join(sorted(SCREAMING_SNAKE_CASE_ ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 626 | 1 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ = 2_5_6
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = ['melgan']
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> None:
'''simple docstring'''
super().__init__()
# From MELGAN
A__ = math.log(1e-5 ) # Matches MelGAN training.
A__ = 4.0 # Largest value for most examples
A__ = 128
self.register_modules(
notes_encoder=lowercase , continuous_encoder=lowercase , decoder=lowercase , scheduler=lowercase , melgan=lowercase , )
def UpperCamelCase ( self , lowercase , lowercase=(-1.0, 1.0) , lowercase=False ) -> str:
'''simple docstring'''
A__ , A__ = output_range
if clip:
A__ = torch.clip(lowercase , self.min_value , self.max_value )
# Scale to [0, 1].
A__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase ( self , lowercase , lowercase=(-1.0, 1.0) , lowercase=False ) -> Optional[int]:
'''simple docstring'''
A__ , A__ = input_range
A__ = torch.clip(lowercase , lowercase , lowercase ) if clip else outputs
# Scale to [0, 1].
A__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Any:
'''simple docstring'''
A__ = input_tokens > 0
A__ , A__ = self.notes_encoder(
encoder_input_tokens=lowercase , encoder_inputs_mask=lowercase )
A__ , A__ = self.continuous_encoder(
encoder_inputs=lowercase , encoder_inputs_mask=lowercase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = noise_time
if not torch.is_tensor(lowercase ):
A__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowercase ) and len(timesteps.shape ) == 0:
A__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
A__ = self.decoder(
encodings_and_masks=lowercase , decoder_input_tokens=lowercase , decoder_noise_time=lowercase )
return logits
@torch.no_grad()
def __call__( self , lowercase , lowercase = None , lowercase = 100 , lowercase = True , lowercase = "numpy" , lowercase = None , lowercase = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase , lowercase ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(lowercase )}.' )
A__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
A__ = np.zeros([1, 0, self.n_dims] , np.floataa )
A__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase , device=self.device )
for i, encoder_input_tokens in enumerate(lowercase ):
if i == 0:
A__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
A__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
A__ = ones
A__ = self.scale_features(
lowercase , output_range=[-1.0, 1.0] , clip=lowercase )
A__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase , continuous_mask=lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
A__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowercase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
A__ = self.decode(
encodings_and_masks=lowercase , input_tokens=lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
A__ = self.scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
A__ = self.scale_to_features(lowercase , input_range=[-1.0, 1.0] )
A__ = mel[:1]
A__ = mel.cpu().float().numpy()
A__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase , lowercase )
logger.info("Generated segment" , lowercase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
A__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
A__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase )
| 626 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=0.9 , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , ) -> str:
'''simple docstring'''
A__ = size if size is not None else {"shortest_edge": 30}
A__ = crop_size if crop_size is not None else {"height": 30, "width": 30}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize_and_center_crop
A__ = size
A__ = crop_pct
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
self.assertTrue(hasattr(lowercase , "crop_pct" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 626 | 1 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def lowerCAmelCase__ ( ) -> Dict:
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head("https://huggingface.co" )
| 626 |
import datasets
from .evaluate import evaluate
lowerCAmelCase__ = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
lowerCAmelCase__ = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
lowerCAmelCase__ = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
A__ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
A__ = evaluate(dataset=lowercase , predictions=lowercase )
return score
| 626 | 1 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCAmelCase__ = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ = list(s_dict.keys() )
for key in keys:
A__ = R".*/layers_(\d+)"
A__ = key
if re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = re.sub(R"layers_(\d+)" , R"block/\1/layer" , SCREAMING_SNAKE_CASE_ )
A__ = R"(encoder|decoder)\/"
if re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).groups()
if groups[0] == "encoder":
A__ = re.sub(R"/mlp/" , R"/1/mlp/" , SCREAMING_SNAKE_CASE_ )
A__ = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , SCREAMING_SNAKE_CASE_ )
elif groups[0] == "decoder":
A__ = re.sub(R"/mlp/" , R"/2/mlp/" , SCREAMING_SNAKE_CASE_ )
A__ = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , SCREAMING_SNAKE_CASE_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A__ = new_key.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F'{key} -> {new_key}' )
A__ = s_dict.pop(SCREAMING_SNAKE_CASE_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A__ = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A__ = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A__ = s_dict[key].shape[0]
A__ = s_dict[key]
for idx in range(SCREAMING_SNAKE_CASE_ ):
A__ = expert_weihts[idx]
print(F'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(SCREAMING_SNAKE_CASE_ )
return s_dict
lowerCAmelCase__ = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Tuple ) -> List[str]:
'''simple docstring'''
import regex as re
with open(SCREAMING_SNAKE_CASE_ , "r" ) as f:
A__ = f.read()
A__ = re.findall(R"(.*) = ([0-9.]*)" , SCREAMING_SNAKE_CASE_ )
A__ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A__ = float(SCREAMING_SNAKE_CASE_ ) if "." in value else int(SCREAMING_SNAKE_CASE_ )
A__ = re.findall(R"(.*activations) = \(\'(.*)\',\)" , SCREAMING_SNAKE_CASE_ )[0]
A__ = str(activation[1] )
A__ = num_experts
A__ = SwitchTransformersConfig(**SCREAMING_SNAKE_CASE_ )
return config
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: List[str]=None , SCREAMING_SNAKE_CASE_: Optional[Any]="./" , SCREAMING_SNAKE_CASE_: Tuple=8 ) -> int:
'''simple docstring'''
print(F'Loading flax weights from : {flax_checkpoint_path}' )
A__ = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
if gin_file is not None:
A__ = convert_gin_to_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
A__ = SwitchTransformersConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
A__ = SwitchTransformersForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
A__ = flax_params["target"]
A__ = flatten_dict(SCREAMING_SNAKE_CASE_ , sep="/" )
A__ = rename_keys(SCREAMING_SNAKE_CASE_ )
A__ = unflatten_dict(SCREAMING_SNAKE_CASE_ , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
lowerCAmelCase__ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 626 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 626 | 1 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: List[Any]=False ) -> Optional[int]:
'''simple docstring'''
try:
A__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ = default
else:
# KEY is set, convert it to True or False.
try:
A__ = strtobool(SCREAMING_SNAKE_CASE_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
lowerCAmelCase__ = parse_flag_from_env("""RUN_SLOW""", default=False)
lowerCAmelCase__ = parse_flag_from_env("""RUN_REMOTE""", default=False)
lowerCAmelCase__ = parse_flag_from_env("""RUN_LOCAL""", default=True)
lowerCAmelCase__ = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
lowerCAmelCase__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
lowerCAmelCase__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
lowerCAmelCase__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
lowerCAmelCase__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
lowerCAmelCase__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
lowerCAmelCase__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
lowerCAmelCase__ = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> str:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
A__ = unittest.skip("test requires faiss" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
A__ = unittest.skip("test requires regex" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> Any:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
A__ = unittest.skip("test requires elasticsearch" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> Optional[Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
A__ = unittest.skip("test requires sqlalchemy" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Tuple:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
A__ = unittest.skip("test requires PyTorch" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Tuple:
'''simple docstring'''
if not config.TF_AVAILABLE:
A__ = unittest.skip("test requires TensorFlow" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> int:
'''simple docstring'''
if not config.JAX_AVAILABLE:
A__ = unittest.skip("test requires JAX" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> int:
'''simple docstring'''
if not config.PIL_AVAILABLE:
A__ = unittest.skip("test requires Pillow" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[Any]:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] ) -> List[Any]:
'''simple docstring'''
def _require_spacy_model(SCREAMING_SNAKE_CASE_: List[Any] ):
try:
import spacy # noqa F401
spacy.load(SCREAMING_SNAKE_CASE_ )
except ImportError:
return unittest.skip("test requires spacy" )(SCREAMING_SNAKE_CASE_ )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(SCREAMING_SNAKE_CASE_ ) )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
return _require_spacy_model
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> Any:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Tuple:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
A__ = unittest.skip("test is slow" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> str:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
A__ = unittest.skip("test is local" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> str:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
A__ = unittest.skip("test is packaged" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> str:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
A__ = unittest.skip("test requires remote" )(SCREAMING_SNAKE_CASE_ )
return test_case
def lowerCAmelCase__ ( *SCREAMING_SNAKE_CASE_: Optional[Any] ) -> str:
'''simple docstring'''
def decorate(cls: Any ):
for name, fn in cls.__dict__.items():
if callable(SCREAMING_SNAKE_CASE_ ) and name.startswith("test" ):
for decorator in decorators:
A__ = decorator(SCREAMING_SNAKE_CASE_ )
setattr(cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cls
return decorate
class a__ ( snake_case ):
"""simple docstring"""
pass
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 2
@contextmanager
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any=OfflineSimulationMode.CONNECTION_FAILS , SCREAMING_SNAKE_CASE_: int=1e-16 ) -> Tuple:
'''simple docstring'''
A__ = requests.Session().request
def timeout_request(SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Tuple , **SCREAMING_SNAKE_CASE_: Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
A__ = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
F'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
A__ = timeout
try:
return online_request(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
A__ = url
A__ = e.args[0]
A__ = (max_retry_error.args[0].replace("10.255.255.1" , F'OfflineMock[{url}]' ),)
A__ = (max_retry_error,)
raise
def raise_connection_error(SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Dict , **SCREAMING_SNAKE_CASE_: int ):
raise requests.ConnectionError("Offline mode is enabled." , request=SCREAMING_SNAKE_CASE_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , SCREAMING_SNAKE_CASE_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , SCREAMING_SNAKE_CASE_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def lowerCAmelCase__ ( *SCREAMING_SNAKE_CASE_: List[str] , **SCREAMING_SNAKE_CASE_: List[Any] ) -> Dict:
'''simple docstring'''
A__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) as tmp_dir:
try:
os.chdir(SCREAMING_SNAKE_CASE_ )
yield
finally:
os.chdir(SCREAMING_SNAKE_CASE_ )
@contextmanager
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
import gc
gc.collect()
A__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
import gc
gc.collect()
A__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Dict:
'''simple docstring'''
return deepcopy(SCREAMING_SNAKE_CASE_ ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(SCREAMING_SNAKE_CASE_ ).integers(0 , 1_0_0 , 1_0 ).tolist()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(SCREAMING_SNAKE_CASE_: List[str] , *SCREAMING_SNAKE_CASE_: List[str] , **SCREAMING_SNAKE_CASE_: Any ):
try:
return func(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
except HTTPError as err:
if str(SCREAMING_SNAKE_CASE_ ).startswith("500" ) or str(SCREAMING_SNAKE_CASE_ ).startswith("502" ):
pytest.xfail(str(SCREAMING_SNAKE_CASE_ ) )
raise err
return decorator.decorator(_wrapper , SCREAMING_SNAKE_CASE_ )
class a__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase , lowercase ) -> Any:
'''simple docstring'''
A__ = returncode
A__ = stdout
A__ = stderr
async def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Tuple:
'''simple docstring'''
while True:
A__ = await stream.readline()
if line:
callback(SCREAMING_SNAKE_CASE_ )
else:
break
async def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Union[str, Any]=None , SCREAMING_SNAKE_CASE_: Any=None , SCREAMING_SNAKE_CASE_: str=None , SCREAMING_SNAKE_CASE_: int=False , SCREAMING_SNAKE_CASE_: Optional[Any]=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print("\nRunning: " , " ".join(SCREAMING_SNAKE_CASE_ ) )
A__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=SCREAMING_SNAKE_CASE_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=SCREAMING_SNAKE_CASE_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ = []
A__ = []
def tee(SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Any="" ):
A__ = line.decode("utf-8" ).rstrip()
sink.append(SCREAMING_SNAKE_CASE_ )
if not quiet:
print(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , file=SCREAMING_SNAKE_CASE_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda SCREAMING_SNAKE_CASE_ : tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda SCREAMING_SNAKE_CASE_ : tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sys.stderr , label="stderr:" ) ),
] , timeout=SCREAMING_SNAKE_CASE_ , )
return _RunOutput(await p.wait() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Union[str, Any]=None , SCREAMING_SNAKE_CASE_: Any=None , SCREAMING_SNAKE_CASE_: Tuple=1_8_0 , SCREAMING_SNAKE_CASE_: Dict=False , SCREAMING_SNAKE_CASE_: Union[str, Any]=True ) -> _RunOutput:
'''simple docstring'''
A__ = asyncio.get_event_loop()
A__ = loop.run_until_complete(
_stream_subprocess(SCREAMING_SNAKE_CASE_ , env=SCREAMING_SNAKE_CASE_ , stdin=SCREAMING_SNAKE_CASE_ , timeout=SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ , echo=SCREAMING_SNAKE_CASE_ ) )
A__ = " ".join(SCREAMING_SNAKE_CASE_ )
if result.returncode > 0:
A__ = "\n".join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'\'{cmd_str}\' produced no output.' )
return result
def lowerCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
A__ = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
A__ = re.sub(R"^gw" , "" , SCREAMING_SNAKE_CASE_ , 0 , re.M )
return int(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
A__ = 2_9_5_0_0
A__ = pytest_xdist_worker_id()
return port + uniq_delta
| 626 |
from math import factorial
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 1_0_0 ) -> int:
'''simple docstring'''
return sum(map(SCREAMING_SNAKE_CASE_ , str(factorial(SCREAMING_SNAKE_CASE_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 626 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class a__ :
"""simple docstring"""
def __init__( self , lowercase ) -> int:
'''simple docstring'''
A__ = data
A__ = None
class a__ :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
A__ = None
A__ = None
def __iter__( self ) -> Iterator[Any]:
'''simple docstring'''
A__ = self.head
while self.head:
yield node.data
A__ = node.next
if node == self.head:
break
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ) -> List[Any]:
'''simple docstring'''
return "->".join(str(lowercase ) for item in iter(self ) )
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , lowercase )
def UpperCamelCase ( self , lowercase ) -> None:
'''simple docstring'''
self.insert_nth(0 , lowercase )
def UpperCamelCase ( self , lowercase , lowercase ) -> None:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
A__ = Node(lowercase )
if self.head is None:
A__ = new_node # first node points itself
A__ = A__ = new_node
elif index == 0: # insert at head
A__ = self.head
A__ = A__ = new_node
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = new_node
if index == len(self ) - 1: # insert at tail
A__ = new_node
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase ( self , lowercase = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
A__ = self.head
if self.head == self.tail: # just one node
A__ = A__ = None
elif index == 0: # delete head node
A__ = self.tail.next.next
A__ = self.head.next
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = temp.next.next
if index == len(self ) - 1: # delete at tail
A__ = temp
return delete_node.data
def UpperCamelCase ( self ) -> bool:
'''simple docstring'''
return len(self ) == 0
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
A__ = CircularLinkedList()
assert len(SCREAMING_SNAKE_CASE_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(SCREAMING_SNAKE_CASE_ ) == i
circular_linked_list.insert_nth(SCREAMING_SNAKE_CASE_ , i + 1 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 626 |
lowerCAmelCase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: bytes ) -> bytes:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(SCREAMING_SNAKE_CASE_ )
A__ = "".join(bin(SCREAMING_SNAKE_CASE_ )[2:].zfill(8 ) for byte in data )
A__ = len(SCREAMING_SNAKE_CASE_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
A__ = b"=" * ((6 - len(SCREAMING_SNAKE_CASE_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE_ ) % 6)
else:
A__ = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 6 ) ).encode()
+ padding
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> bytes:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = (
"argument should be a bytes-like object or ASCII string, "
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(SCREAMING_SNAKE_CASE_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
try:
A__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
A__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
A__ = encoded_data[:-padding]
A__ = "".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
A__ = "".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE_ ) )[2:].zfill(6 ) for char in encoded_data )
A__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 8 )
]
return bytes(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 626 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = ['pixel_values']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> None:
'''simple docstring'''
super().__init__(**lowercase )
A__ = size if size is not None else {"shortest_edge": 224}
A__ = get_size_dict(lowercase , default_to_square=lowercase )
A__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
A__ = get_size_dict(lowercase , default_to_square=lowercase , param_name="crop_size" )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A__ = image_std if image_std is not None else OPENAI_CLIP_STD
A__ = do_convert_rgb
def UpperCamelCase ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
A__ = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A__ = get_resize_output_image_size(lowercase , size=size["shortest_edge"] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
A__ = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["height"], size["width"]) , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> List[str]:
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image:
'''simple docstring'''
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(lowercase , param_name="size" , default_to_square=lowercase )
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(lowercase , param_name="crop_size" , default_to_square=lowercase )
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A__ = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A__ = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A__ = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A__ = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A__ = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A__ = {"pixel_values": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 626 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__lowerCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(default=snake_case , metadata={'help': 'The input training data file (a text file).'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if self.train_file is not None:
A__ = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
A__ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = True
__lowerCamelCase = None
__lowerCamelCase = None
def __call__( self , lowercase ) -> Tuple:
'''simple docstring'''
A__ = "label" if "label" in features[0].keys() else "labels"
A__ = [feature.pop(lowercase ) for feature in features]
A__ = len(lowercase )
A__ = len(features[0]["input_ids"] )
A__ = [
[{k: v[i] for k, v in feature.items()} for i in range(lowercase )] for feature in features
]
A__ = list(chain(*lowercase ) )
A__ = self.tokenizer.pad(
lowercase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
A__ = {k: v.view(lowercase , lowercase , -1 ) for k, v in batch.items()}
# Add back labels
A__ = torch.tensor(lowercase , dtype=torch.intaa )
return batch
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
A__ = {}
if data_args.train_file is not None:
A__ = data_args.train_file
if data_args.validation_file is not None:
A__ = data_args.validation_file
A__ = data_args.train_file.split("." )[-1]
A__ = load_dataset(
SCREAMING_SNAKE_CASE_ , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
A__ = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
A__ = [F'ending{i}' for i in range(4 )]
A__ = "sent1"
A__ = "sent2"
if data_args.max_seq_length is None:
A__ = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
A__ = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
A__ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(SCREAMING_SNAKE_CASE_: Optional[Any] ):
A__ = [[context] * 4 for context in examples[context_name]]
A__ = examples[question_header_name]
A__ = [
[F'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(SCREAMING_SNAKE_CASE_ )
]
# Flatten out
A__ = list(chain(*SCREAMING_SNAKE_CASE_ ) )
A__ = list(chain(*SCREAMING_SNAKE_CASE_ ) )
# Tokenize
A__ = tokenizer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
A__ = raw_datasets["train"]
if data_args.max_train_samples is not None:
A__ = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_train_samples )
A__ = train_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
A__ = train_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
A__ = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
A__ = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_eval_samples )
A__ = eval_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
A__ = eval_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
A__ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(SCREAMING_SNAKE_CASE_: str ):
A__ , A__ = eval_predictions
A__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
A__ = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ = train_result.metrics
A__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ )
)
A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE_ )
A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE_ )
A__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 626 | 1 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a__ :
"""simple docstring"""
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
return None
class a__ :
"""simple docstring"""
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
return None
class a__ ( unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase , "tf" , 12 , **lowercase )
@require_torch
@slow
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase , "pt" , 12 , **lowercase )
@require_torch
@slow
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
from transformers import BertModel
A__ = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(lowercase ) )
vocab_file.flush()
A__ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
A__ = BertModel(BertConfig(vocab_size=len(lowercase ) ) )
model.save_pretrained(lowercase )
self._test_export(lowercase , "pt" , 12 , lowercase )
@require_tf
@slow
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A__ = self._test_export(lowercase , "tf" , 12 , **lowercase )
A__ = quantize(Path(lowercase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A__ = self._test_export(lowercase , "pt" , 12 , **lowercase )
A__ = quantize(lowercase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase=None , **lowercase ) -> str:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
A__ = Path(lowercase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase )
return path
except Exception as e:
self.fail(lowercase )
@require_torch
@require_tokenizers
@slow
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
from transformers import BertModel
A__ = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
A__ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(lowercase , lowercase , "pt" )
@require_tf
@require_tokenizers
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
from transformers import TFBertModel
A__ = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
A__ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(lowercase , lowercase , "tf" )
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = FeatureExtractionPipeline(lowercase , lowercase )
A__ = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
A__ , A__ , A__ , A__ = infer_shapes(lowercase , lowercase )
# Assert all variables are present
self.assertEqual(len(lowercase ) , len(lowercase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowercase )
self.assertSequenceEqual(variable_names[3:] , lowercase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = ["input_ids", "attention_mask", "token_type_ids"]
A__ = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
A__ , A__ = ensure_valid_input(FuncContiguousArgs() , lowercase , lowercase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowercase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowercase ) , set(lowercase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowercase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
A__ , A__ = ensure_valid_input(FuncNonContiguousArgs() , lowercase , lowercase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowercase ) , 1 )
self.assertEqual(len(lowercase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 626 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
A__ = 1
A__ = 1
while repunit:
A__ = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
A__ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(SCREAMING_SNAKE_CASE_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 626 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.