code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = '''longformer'''
def __init__( self , a_ = 512 , a_ = 2 , a_ = 1 , a_ = 0 , a_ = 2 , a_ = 3_0522 , a_ = 768 , a_ = 12 , a_ = 12 , a_ = 3072 , a_ = "gelu" , a_ = 0.1 , a_ = 0.1 , a_ = 512 , a_ = 2 , a_ = 0.02 , a_ = 1E-12 , a_ = False , **a_ , ):
super().__init__(pad_token_id=a_ , **a_ )
lowerCamelCase_ : str = attention_window
lowerCamelCase_ : Union[str, Any] = sep_token_id
lowerCamelCase_ : Dict = bos_token_id
lowerCamelCase_ : List[Any] = eos_token_id
lowerCamelCase_ : str = vocab_size
lowerCamelCase_ : Union[str, Any] = hidden_size
lowerCamelCase_ : Optional[Any] = num_hidden_layers
lowerCamelCase_ : List[Any] = num_attention_heads
lowerCamelCase_ : str = hidden_act
lowerCamelCase_ : List[str] = intermediate_size
lowerCamelCase_ : Optional[int] = hidden_dropout_prob
lowerCamelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase_ : Optional[int] = max_position_embeddings
lowerCamelCase_ : Any = type_vocab_size
lowerCamelCase_ : Union[str, Any] = initializer_range
lowerCamelCase_ : Any = layer_norm_eps
lowerCamelCase_ : Optional[int] = onnx_export
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ = "default" , a_ = None ):
super().__init__(a_ , a_ , a_ )
lowerCamelCase_ : Union[str, Any] = True
@property
def _UpperCamelCase ( self ):
if self.task == "multiple-choice":
lowerCamelCase_ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase_ : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = super().outputs
if self.task == "default":
lowerCamelCase_ : Optional[int] = {0: "batch"}
return outputs
@property
def _UpperCamelCase ( self ):
return 1E-4
@property
def _UpperCamelCase ( self ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _UpperCamelCase ( self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , ):
lowerCamelCase_ : int = super().generate_dummy_inputs(
preprocessor=a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCamelCase_ : int = torch.zeros_like(inputs["input_ids"] )
# make every second token global
lowerCamelCase_ : List[str] = 1
return inputs
| 73 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowerCAmelCase_), magnitude * sin(lowerCAmelCase_)]
return [magnitude * cos(radians(lowerCAmelCase_)), magnitude * sin(radians(lowerCAmelCase_))]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1):
'''simple docstring'''
lowerCamelCase_ : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : float = sum(lowerCAmelCase_)
return abs(lowerCAmelCase_) < eps
if __name__ == "__main__":
# Test to check if it works
__magic_name__ = array(
[
polar_force(7_18.4, 1_8_0 - 3_0),
polar_force(8_79.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__magic_name__ = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__magic_name__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__magic_name__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 | 1 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = IFPipeline
__UpperCAmelCase : List[str] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _UpperCamelCase ( self ):
return self._get_dummy_components()
def _UpperCamelCase ( self , a_ , a_=0 ):
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : int = torch.manual_seed(a_ )
else:
lowerCamelCase_ : Any = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _UpperCamelCase ( self ):
self._test_save_load_local()
def _UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ):
# if
lowerCamelCase_ : Optional[Any] = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
lowerCamelCase_ : Optional[Any] = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=a_ , tokenizer=a_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowerCamelCase_ : Optional[Any] = None
lowerCamelCase_ : Tuple = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(a_ , a_ , a_ , a_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowerCamelCase_ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
lowerCamelCase_ : str = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(a_ , a_ , a_ , a_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowerCamelCase_ : int = IFInpaintingPipeline(**pipe_a.components )
lowerCamelCase_ : List[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(a_ , a_ , a_ , a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
# pipeline 1
_start_torch_memory_measurement()
lowerCamelCase_ : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , num_inference_steps=2 , generator=a_ , output_type="np" , )
lowerCamelCase_ : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
lowerCamelCase_ : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowerCamelCase_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase_ : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : List[Any] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , )
lowerCamelCase_ : Dict = output.images[0]
assert image.shape == (256, 256, 3)
lowerCamelCase_ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCamelCase_ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a_ , a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
# pipeline 1
_start_torch_memory_measurement()
lowerCamelCase_ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ : int = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , num_inference_steps=2 , generator=a_ , output_type="np" , )
lowerCamelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowerCamelCase_ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCamelCase_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase_ : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : List[Any] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , original_image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , )
lowerCamelCase_ : Any = output.images[0]
assert image.shape == (256, 256, 3)
lowerCamelCase_ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCamelCase_ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a_ , a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
# pipeline 1
_start_torch_memory_measurement()
lowerCamelCase_ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(a_ )
lowerCamelCase_ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ : Any = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , mask_image=a_ , num_inference_steps=2 , generator=a_ , output_type="np" , )
lowerCamelCase_ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowerCamelCase_ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCamelCase_ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase_ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(a_ )
lowerCamelCase_ : Optional[int] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , mask_image=a_ , original_image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , )
lowerCamelCase_ : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
lowerCamelCase_ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCamelCase_ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a_ , a_ )
def __magic_name__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 73 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ClapFeatureExtractor'''
__UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
def __call__( self , a_=None , a_=None , a_=None , **a_ ):
lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if audios is not None:
lowerCamelCase_ : List[str] = self.feature_extractor(
a_ , sampling_rate=a_ , return_tensors=a_ , **a_ )
if text is not None and audios is not None:
lowerCamelCase_ : List[str] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.tokenizer.model_input_names
lowerCamelCase_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 73 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''▁'''
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__magic_name__ = {
'''facebook/s2t-small-librispeech-asr''': 1_0_2_4,
}
__magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__magic_name__ = {'''mustc''': MUSTC_LANGS}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[str] = MAX_MODEL_INPUT_SIZES
__UpperCAmelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase : List[int] = []
def __init__( self , a_ , a_ , a_="<s>" , a_="</s>" , a_="<pad>" , a_="<unk>" , a_=False , a_=False , a_=None , a_=None , a_ = None , **a_ , ):
lowerCamelCase_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , pad_token=a_ , do_upper_case=a_ , do_lower_case=a_ , tgt_lang=a_ , lang_codes=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCamelCase_ : Tuple = do_upper_case
lowerCamelCase_ : Tuple = do_lower_case
lowerCamelCase_ : Optional[int] = load_json(a_ )
lowerCamelCase_ : Tuple = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ : str = spm_file
lowerCamelCase_ : int = load_spm(a_ , self.sp_model_kwargs )
if lang_codes is not None:
lowerCamelCase_ : Tuple = lang_codes
lowerCamelCase_ : Union[str, Any] = LANGUAGES[lang_codes]
lowerCamelCase_ : Any = [F"""<lang:{lang}>""" for lang in self.langs]
lowerCamelCase_ : Optional[Any] = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
lowerCamelCase_ : int = self.lang_tokens
lowerCamelCase_ : Union[str, Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowerCamelCase_ : List[Any] = {}
@property
def _UpperCamelCase ( self ):
return len(self.encoder )
@property
def _UpperCamelCase ( self ):
return self._tgt_lang
@tgt_lang.setter
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Tuple = new_tgt_lang
self.set_tgt_lang_special_tokens(a_ )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Union[str, Any] = self.lang_code_to_id[tgt_lang]
lowerCamelCase_ : Optional[Any] = [lang_code_id]
def _UpperCamelCase ( self , a_ ):
return self.sp_model.encode(a_ , out_type=a_ )
def _UpperCamelCase ( self , a_ ):
return self.encoder.get(a_ , self.encoder[self.unk_token] )
def _UpperCamelCase ( self , a_ ):
return self.decoder.get(a_ , self.unk_token )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Any = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowerCamelCase_ : Dict = self.sp_model.decode(a_ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowerCamelCase_ : List[str] = []
else:
current_sub_tokens.append(a_ )
lowerCamelCase_ : Union[str, Any] = self.sp_model.decode(a_ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _UpperCamelCase ( self , a_ , a_=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
lowerCamelCase_ : List[str] = [1] * len(self.prefix_tokens )
lowerCamelCase_ : str = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(a_ )) + suffix_ones
return prefix_ones + ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase_ : Optional[int] = self.__dict__.copy()
lowerCamelCase_ : List[str] = None
return state
def __setstate__( self , a_ ):
lowerCamelCase_ : str = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : Any = {}
lowerCamelCase_ : List[str] = load_spm(self.spm_file , self.sp_model_kwargs )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : str = Path(a_ )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
lowerCamelCase_ : Optional[Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
lowerCamelCase_ : Union[str, Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , a_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(a_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , a_ )
elif not os.path.isfile(self.spm_file ):
with open(a_ , "wb" ) as fi:
lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (str(a_ ), str(a_ ))
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = sentencepiece.SentencePieceProcessor(**lowerCAmelCase_)
spm.Load(str(lowerCAmelCase_))
return spm
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "r") as f:
return json.load(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "w") as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ , indent=2)
| 73 |
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : Any = set()
# Replace all the whitespace in our sentence
lowerCamelCase_ : str = input_str.replace(" " , "")
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(lowerCAmelCase_) == 26
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = [False] * 26
for char in input_str:
if char.islower():
lowerCamelCase_ : List[Any] = True
elif char.isupper():
lowerCamelCase_ : Optional[int] = True
return all(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def __magic_name__ ( ):
'''simple docstring'''
from timeit import timeit
lowerCamelCase_ : Optional[int] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_faster()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_fastest()" , setup=lowerCAmelCase_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 73 | 1 |
import re
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if len(re.findall("[ATCG]" , lowerCAmelCase_)) != len(lowerCAmelCase_):
raise ValueError("Invalid Strand")
return dna.translate(dna.maketrans("ATCG" , "TAGC"))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
__magic_name__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase_ : List[Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(lowerCAmelCase_)}"""
)
raise ValueError(lowerCAmelCase_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCAmelCase__ ( yaml.SafeLoader ):
"""simple docstring"""
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : List[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowerCamelCase_ : str = [tuple(a_ ) if isinstance(a_ , a_ ) else key for key in keys]
lowerCamelCase_ : List[Any] = Counter(a_ )
lowerCamelCase_ : Union[str, Any] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def _UpperCamelCase ( self , a_ , a_=False ):
lowerCamelCase_ : Optional[Any] = super().construct_mapping(a_ , deep=a_ )
self._check_no_duplicates_on_constructed_node(a_ )
return mapping
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[str] = list(readme_content.splitlines())
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowerCamelCase_ : str = full_content[1:].index("---") + 1
lowerCamelCase_ : Optional[int] = "\n".join(full_content[1:sep_idx])
return yamlblock, "\n".join(full_content[sep_idx + 1 :])
return None, "\n".join(lowerCAmelCase_)
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
# class attributes
__UpperCAmelCase : Any = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def _UpperCamelCase ( cls , a_ ):
with open(a_ , encoding="utf-8" ) as readme_file:
lowerCamelCase_ ,lowerCamelCase_ : Any = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(a_ )
else:
return cls()
def _UpperCamelCase ( self , a_ ):
if path.exists():
with open(a_ , encoding="utf-8" ) as readme_file:
lowerCamelCase_ : Optional[Any] = readme_file.read()
else:
lowerCamelCase_ : Any = None
lowerCamelCase_ : List[str] = self._to_readme(a_ )
with open(a_ , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(a_ )
def _UpperCamelCase ( self , a_ = None ):
if readme_content is not None:
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = _split_yaml_from_readme(a_ )
lowerCamelCase_ : int = "---\n" + self.to_yaml_string() + "---\n" + content
else:
lowerCamelCase_ : Any = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def _UpperCamelCase ( cls , a_ ):
lowerCamelCase_ : str = yaml.load(a_ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowerCamelCase_ : List[str] = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**a_ )
def _UpperCamelCase ( self ):
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=a_ , allow_unicode=a_ , encoding="utf-8" , ).decode("utf-8" )
__magic_name__ = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__magic_name__ = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__magic_name__ = ap.parse_args()
__magic_name__ = Path(args.readme_filepath)
__magic_name__ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 73 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''spiece.model'''}
__magic_name__ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__magic_name__ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 3
__magic_name__ = 4
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] = '''left'''
def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = do_lower_case
lowerCamelCase_ : str = remove_space
lowerCamelCase_ : Tuple = keep_accents
lowerCamelCase_ : Dict = vocab_file
lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def _UpperCamelCase ( self ):
return len(self.sp_model )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase_ : Any = self.__dict__.copy()
lowerCamelCase_ : Optional[int] = None
return state
def __setstate__( self , a_ ):
lowerCamelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : int = {}
lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , a_ ):
if self.remove_space:
lowerCamelCase_ : Optional[int] = " ".join(inputs.strip().split() )
else:
lowerCamelCase_ : str = inputs
lowerCamelCase_ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase_ : Dict = unicodedata.normalize("NFKD" , a_ )
lowerCamelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
lowerCamelCase_ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : List[Any] = self.preprocess_text(a_ )
lowerCamelCase_ : Optional[int] = self.sp_model.encode(a_ , out_type=a_ )
lowerCamelCase_ : List[str] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ : int = cur_pieces[1:]
else:
lowerCamelCase_ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def _UpperCamelCase ( self , a_ ):
return self.sp_model.PieceToId(a_ )
def _UpperCamelCase ( self , a_ ):
return self.sp_model.IdToPiece(a_ )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Dict = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def _UpperCamelCase ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ):
lowerCamelCase_ : int = kwargs.pop("use_source_tokenizer" , a_ )
lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
lowerCamelCase_ : Union[str, Any] = []
sub_texts.append(a_ )
else:
current_sub_text.append(a_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase_ : Union[str, Any] = "".join(a_ )
lowerCamelCase_ : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase_ : List[Any] = self.clean_up_tokenization(a_ )
return clean_text
else:
return text
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Any = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 73 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def _UpperCamelCase ( a_ ):
raise NotImplementedError()
@abstractmethod
def _UpperCamelCase ( self ):
raise NotImplementedError()
| 73 |
def __magic_name__ ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = True):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return int((number_a + number_a) / 2)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(lowerCAmelCase_) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowerCamelCase_ : Optional[int] = lower
lowerCamelCase_ : Tuple = higher
lowerCamelCase_ : Union[str, Any] = []
while True:
lowerCamelCase_ : Optional[int] = get_avg(lowerCAmelCase_ , lowerCAmelCase_)
last_numbers.append(lowerCAmelCase_)
if answer(lowerCAmelCase_) == "low":
lowerCamelCase_ : Any = number
elif answer(lowerCAmelCase_) == "high":
lowerCamelCase_ : Optional[int] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""")
print(F"""details : {last_numbers!s}""")
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = int(input("Enter lower value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter high value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter value to guess : ").strip())
guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
main()
| 73 | 1 |
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self , a_ ):
lowerCamelCase_ : Optional[Any] = size
lowerCamelCase_ : Optional[int] = [0] * size
lowerCamelCase_ : List[Any] = [0] * size
@staticmethod
def _UpperCamelCase ( a_ ):
return index | (index + 1)
@staticmethod
def _UpperCamelCase ( a_ ):
return (index & (index + 1)) - 1
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Union[str, Any] = value
while index < self.size:
lowerCamelCase_ : Optional[Any] = self.get_prev(a_ ) + 1
if current_left_border == index:
lowerCamelCase_ : int = value
else:
lowerCamelCase_ : str = max(a_ , a_ , a_ )
lowerCamelCase_ : Dict = self.get_next(a_ )
def _UpperCamelCase ( self , a_ , a_ ):
right -= 1 # Because of right is exclusive
lowerCamelCase_ : Tuple = 0
while left <= right:
lowerCamelCase_ : List[Any] = self.get_prev(a_ )
if left <= current_left:
lowerCamelCase_ : Union[str, Any] = max(a_ , self.tree[right] )
lowerCamelCase_ : Optional[int] = current_left
else:
lowerCamelCase_ : Dict = max(a_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''cvt'''
def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : str = patch_sizes
lowerCamelCase_ : List[Any] = patch_stride
lowerCamelCase_ : str = patch_padding
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Union[str, Any] = num_heads
lowerCamelCase_ : Optional[Any] = depth
lowerCamelCase_ : int = mlp_ratio
lowerCamelCase_ : Union[str, Any] = attention_drop_rate
lowerCamelCase_ : Optional[Any] = drop_rate
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Union[str, Any] = qkv_bias
lowerCamelCase_ : int = cls_token
lowerCamelCase_ : int = qkv_projection_method
lowerCamelCase_ : int = kernel_qkv
lowerCamelCase_ : Optional[Any] = padding_kv
lowerCamelCase_ : Optional[int] = stride_kv
lowerCamelCase_ : Optional[int] = padding_q
lowerCamelCase_ : List[Any] = stride_q
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = layer_norm_eps
| 73 | 1 |
import sys
import turtle
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
if depth == 0:
return
triangle(lowerCAmelCase_ , get_mid(lowerCAmelCase_ , lowerCAmelCase_) , get_mid(lowerCAmelCase_ , lowerCAmelCase_) , depth - 1)
triangle(lowerCAmelCase_ , get_mid(lowerCAmelCase_ , lowerCAmelCase_) , get_mid(lowerCAmelCase_ , lowerCAmelCase_) , depth - 1)
triangle(lowerCAmelCase_ , get_mid(lowerCAmelCase_ , lowerCAmelCase_) , get_mid(lowerCAmelCase_ , lowerCAmelCase_) , depth - 1)
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
__magic_name__ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
__magic_name__ = [(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 73 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 1 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self , a_ , a_ = 13 , a_ = 64 , a_ = 2 , a_ = 3 , a_ = 3 , a_ = True , a_ = True , a_ = 128 , a_=[16, 32, 64, 128] , a_ = 7 , a_ = 4 , a_ = 37 , a_ = "gelu" , a_ = 0.1 , a_ = 0.1 , a_ = 10 , a_ = 0.02 , a_ = 2 , a_ = 1 , a_ = 128 , a_ = [2, 2, 2, 2] , a_ = 2 , a_ = 2 , ):
lowerCamelCase_ : List[str] = parent
lowerCamelCase_ : Dict = batch_size
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : Optional[Any] = patch_size
lowerCamelCase_ : List[str] = num_channels
lowerCamelCase_ : Tuple = is_training
lowerCamelCase_ : Optional[Any] = use_labels
lowerCamelCase_ : Optional[int] = hidden_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : Tuple = num_attention_heads
lowerCamelCase_ : List[Any] = intermediate_size
lowerCamelCase_ : Any = hidden_act
lowerCamelCase_ : Dict = hidden_dropout_prob
lowerCamelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase_ : List[Any] = type_sequence_label_size
lowerCamelCase_ : Optional[Any] = initializer_range
lowerCamelCase_ : Any = encoder_stride
lowerCamelCase_ : str = num_attention_outputs
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Dict = embed_dim + 1
lowerCamelCase_ : str = resolution
lowerCamelCase_ : int = depths
lowerCamelCase_ : Optional[Any] = hidden_sizes
lowerCamelCase_ : str = dim
lowerCamelCase_ : Any = mlp_expansion_ratio
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : Optional[int] = None
if self.use_labels:
lowerCamelCase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : Union[str, Any] = TFEfficientFormerModel(config=a_ )
lowerCamelCase_ : Optional[int] = model(a_ , training=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : Optional[Any] = self.type_sequence_label_size
lowerCamelCase_ : List[str] = TFEfficientFormerForImageClassification(a_ )
lowerCamelCase_ : Union[str, Any] = model(a_ , labels=a_ , training=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ : int = 1
lowerCamelCase_ : Union[str, Any] = TFEfficientFormerForImageClassification(a_ )
lowerCamelCase_ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[Any] = config_and_inputs
lowerCamelCase_ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : Tuple = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Dict = False
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = TFEfficientFormerModelTester(self )
lowerCamelCase_ : str = ConfigTester(
self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def _UpperCamelCase ( self ):
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
lowerCamelCase_ ,lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : str = model_class(a_ )
lowerCamelCase_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Optional[int] = [*signature.parameters.keys()]
lowerCamelCase_ : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def _UpperCamelCase ( self ):
def check_hidden_states_output(a_ , a_ , a_ ):
lowerCamelCase_ : int = model_class(a_ )
lowerCamelCase_ : int = model(**self._prepare_for_class(a_ , a_ ) , training=a_ )
lowerCamelCase_ : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ : List[Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a_ ) , a_ )
if hasattr(self.model_tester , "encoder_seq_length" ):
lowerCamelCase_ : List[Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
lowerCamelCase_ : int = seq_length * self.model_tester.chunk_length
else:
lowerCamelCase_ : Union[str, Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowerCamelCase_ : str = outputs.decoder_hidden_states
self.asseretIsInstance(a_ , (list, tuple) )
self.assertEqual(len(a_ ) , a_ )
lowerCamelCase_ : Any = getattr(self.model_tester , "seq_length" , a_ )
lowerCamelCase_ : Union[str, Any] = getattr(self.model_tester , "decoder_seq_length" , a_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowerCamelCase_ ,lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[int] = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ : Tuple = True
check_hidden_states_output(a_ , a_ , a_ )
def _UpperCamelCase ( self , a_ , a_ , a_=False ):
lowerCamelCase_ : List[str] = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def _UpperCamelCase ( self ):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Tuple = TFEfficientFormerModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ ,lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Tuple = True
lowerCamelCase_ : Tuple = getattr(self.model_tester , "seq_length" , a_ )
lowerCamelCase_ : List[Any] = getattr(self.model_tester , "encoder_seq_length" , a_ )
lowerCamelCase_ : Dict = getattr(self.model_tester , "key_length" , a_ )
lowerCamelCase_ : Optional[Any] = getattr(self.model_tester , "chunk_length" , a_ )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
lowerCamelCase_ : Optional[int] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : Dict = True
lowerCamelCase_ : str = model_class(a_ )
lowerCamelCase_ : Dict = model(**self._prepare_for_class(a_ , a_ ) , training=a_ )
lowerCamelCase_ : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : Any = model_class(a_ )
lowerCamelCase_ : str = model(**self._prepare_for_class(a_ , a_ ) , training=a_ )
lowerCamelCase_ : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _UpperCamelCase ( self ):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
lowerCamelCase_ ,lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowerCamelCase_ : Optional[int] = model_class(a_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowerCamelCase_ : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=a_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowerCamelCase_ : List[str] = model(a_ )
self.assertTrue(outputs_dict is not None )
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self ):
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
lowerCamelCase_ : List[str] = self.default_image_processor
lowerCamelCase_ : Tuple = prepare_img()
lowerCamelCase_ : str = image_processor(images=a_ , return_tensors="tf" )
# forward pass
lowerCamelCase_ : Any = model(**a_ , training=a_ )
# verify the logits
lowerCamelCase_ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
lowerCamelCase_ : Any = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , a_ , atol=1E-4 ) )
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
lowerCamelCase_ : List[str] = self.default_image_processor
lowerCamelCase_ : int = prepare_img()
lowerCamelCase_ : Optional[int] = image_processor(images=a_ , return_tensors="tf" )
# forward pass
lowerCamelCase_ : int = model(**a_ , training=a_ )
# verify the logits
lowerCamelCase_ : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
lowerCamelCase_ : List[str] = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , a_ , atol=1E-4 ) )
| 73 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''EncodecFeatureExtractor'''
__UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
lowerCamelCase_ : Optional[Any] = self.feature_extractor
lowerCamelCase_ : Optional[int] = False
def _UpperCamelCase ( self , a_=None , a_=None , a_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ )
def __call__( self , *a_ , **a_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
lowerCamelCase_ : str = kwargs.pop("audio" , a_ )
lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : int = args[0]
lowerCamelCase_ : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ )
if audio is not None:
lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCamelCase_ : Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCamelCase_ : int = audio_inputs["padding_mask"]
return inputs
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : Optional[int] = args[0]
lowerCamelCase_ : Optional[Any] = args[1:]
if audio_values is not None:
return self._decode_audio(a_ , padding_mask=a_ )
else:
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Any = to_numpy(a_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape
if padding_mask is None:
return list(a_ )
lowerCamelCase_ : Tuple = to_numpy(a_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1]
lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value
lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ )
lowerCamelCase_ : str = audio_values.tolist()
for i in range(a_ ):
lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 )
return audio_values
| 73 | 1 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs))
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[Any] = np.max(_outputs , axis=-1 , keepdims=lowerCAmelCase_)
lowerCamelCase_ : int = np.exp(_outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCAmelCase_)
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = '''sigmoid'''
__UpperCAmelCase : List[str] = '''softmax'''
__UpperCAmelCase : Tuple = '''none'''
@add_end_docstrings(
__lowerCamelCase, r'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''', )
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = ClassificationFunction.NONE
def __init__( self , **a_ ):
super().__init__(**a_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _UpperCamelCase ( self , a_=None , a_=None , a_="" , **a_ ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
lowerCamelCase_ : Dict = tokenizer_kwargs
lowerCamelCase_ : Dict = {}
if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None:
lowerCamelCase_ : Tuple = self.model.config.return_all_scores
if isinstance(a_ , a_ ) or top_k is None:
lowerCamelCase_ : Union[str, Any] = top_k
lowerCamelCase_ : Optional[Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , a_ , )
if return_all_scores:
lowerCamelCase_ : Any = None
else:
lowerCamelCase_ : Optional[int] = 1
if isinstance(a_ , a_ ):
lowerCamelCase_ : Union[str, Any] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowerCamelCase_ : str = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *a_ , **a_ ):
lowerCamelCase_ : int = super().__call__(*a_ , **a_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowerCamelCase_ : Optional[Any] = "top_k" not in kwargs
if isinstance(args[0] , a_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _UpperCamelCase ( self , a_ , **a_ ):
lowerCamelCase_ : str = self.framework
if isinstance(a_ , a_ ):
return self.tokenizer(**a_ , return_tensors=a_ , **a_ )
elif isinstance(a_ , a_ ) and len(a_ ) == 1 and isinstance(inputs[0] , a_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a_ , **a_ )
elif isinstance(a_ , a_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(a_ , return_tensors=a_ , **a_ )
def _UpperCamelCase ( self , a_ ):
return self.model(**a_ )
def _UpperCamelCase ( self , a_ , a_=None , a_=1 , a_=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowerCamelCase_ : Union[str, Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowerCamelCase_ : str = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None:
lowerCamelCase_ : Dict = self.model.config.function_to_apply
else:
lowerCamelCase_ : int = ClassificationFunction.NONE
lowerCamelCase_ : Tuple = model_outputs["logits"][0]
lowerCamelCase_ : List[Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowerCamelCase_ : Dict = sigmoid(a_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowerCamelCase_ : Optional[Any] = softmax(a_ )
elif function_to_apply == ClassificationFunction.NONE:
lowerCamelCase_ : int = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowerCamelCase_ : Tuple = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(a_ )
]
if not _legacy:
dict_scores.sort(key=lambda a_ : x["score"] , reverse=a_ )
if top_k is not None:
lowerCamelCase_ : Any = dict_scores[:top_k]
return dict_scores
| 73 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase_) , lowerCAmelCase_)
return number - int(lowerCAmelCase_)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 73 | 1 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
print("\nThe shortest path matrix using Floyd Warshall algorithm\n")
for i in range(lowerCAmelCase_):
for j in range(lowerCAmelCase_):
if dist[i][j] != float("inf"):
print(int(dist[i][j]) , end="\t")
else:
print("INF" , end="\t")
print()
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = [[float("inf") for _ in range(lowerCAmelCase_)] for _ in range(lowerCAmelCase_)]
for i in range(lowerCAmelCase_):
for j in range(lowerCAmelCase_):
lowerCamelCase_ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCAmelCase_):
# looping through rows of graph array
for i in range(lowerCAmelCase_):
# looping through columns of graph array
for j in range(lowerCAmelCase_):
if (
dist[i][k] != float("inf")
and dist[k][j] != float("inf")
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowerCamelCase_ : Any = dist[i][k] + dist[k][j]
_print_dist(lowerCAmelCase_ , lowerCAmelCase_)
return dist, v
if __name__ == "__main__":
__magic_name__ = int(input('''Enter number of vertices: '''))
__magic_name__ = int(input('''Enter number of edges: '''))
__magic_name__ = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
__magic_name__ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
__magic_name__ = int(input('''Enter source:'''))
__magic_name__ = int(input('''Enter destination:'''))
__magic_name__ = float(input('''Enter weight:'''))
__magic_name__ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 73 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ):
lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ : str = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : List[str] = apply_ocr
def _UpperCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "apply_ocr" ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# with apply_OCR = True
lowerCamelCase_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 73 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = XLNetTokenizer
__UpperCAmelCase : Optional[Any] = XLNetTokenizerFast
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Tuple = True
def _UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : str = XLNetTokenizer(a_ , keep_accents=a_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = "<s>"
lowerCamelCase_ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(a_ ) , 1006 )
def _UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = XLNetTokenizer(a_ , keep_accents=a_ )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [285, 46, 10, 170, 382] )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase_ : Optional[int] = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(a_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
lowerCamelCase_ : Any = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = XLNetTokenizer(a_ , do_lower_case=a_ )
lowerCamelCase_ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = XLNetTokenizer(a_ , do_lower_case=a_ )
lowerCamelCase_ : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
lowerCamelCase_ : str = tokenizer.encode("sequence builders" , add_special_tokens=a_ )
lowerCamelCase_ : Any = tokenizer.encode("multi-sequence build" , add_special_tokens=a_ )
lowerCamelCase_ : int = tokenizer.build_inputs_with_special_tokens(a_ )
lowerCamelCase_ : int = tokenizer.build_inputs_with_special_tokens(a_ , a_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def _UpperCamelCase ( self ):
# fmt: off
lowerCamelCase_ : Any = {"input_ids": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 73 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''luke'''
def __init__( self , a_=5_0267 , a_=50_0000 , a_=768 , a_=256 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=True , a_=None , a_=1 , a_=0 , a_=2 , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : Optional[int] = entity_vocab_size
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Dict = entity_emb_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Tuple = intermediate_size
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Optional[Any] = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : Optional[int] = use_entity_aware_attention
lowerCamelCase_ : str = classifier_dropout
| 73 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 73 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : Optional[datasets.Features] = None
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
import pyspark
def generate_fn():
lowerCamelCase_ : Dict = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id"))
for partition_id in partition_order:
lowerCamelCase_ : Dict = df_with_partition_id.select("*").where(F"""part_id = {partition_id}""").drop("part_id")
lowerCamelCase_ : Dict = partition_df.collect()
lowerCamelCase_ : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , a_ , a_=None , ):
lowerCamelCase_ : Dict = df
lowerCamelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase_ : int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Dict = self.split_shard_indices_by_worker(a_ , a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
@property
def _UpperCamelCase ( self ):
return len(self.partition_order )
class lowerCAmelCase__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__UpperCAmelCase : Any = SparkConfig
def __init__( self , a_ , a_ = None , a_ = None , **a_ , ):
import pyspark
lowerCamelCase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase_ : Optional[Any] = df
lowerCamelCase_ : List[Any] = working_dir
super().__init__(
cache_dir=a_ , config_name=str(self.df.semanticHash() ) , **a_ , )
def _UpperCamelCase ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(a_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a_ )
lowerCamelCase_ : Optional[Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a_ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase_ : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _UpperCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self , a_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _UpperCamelCase ( self , a_ ):
import pyspark
def get_arrow_batch_size(a_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
lowerCamelCase_ : str = self.df.count()
lowerCamelCase_ : List[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase_ : Any = (
self.df.limit(a_ )
.repartition(1 )
.mapInArrow(a_ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase_ : int = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase_ : Union[str, Any] = min(a_ , int(approx_total_size / max_shard_size ) )
lowerCamelCase_ : int = self.df.repartition(a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , ):
import pyspark
lowerCamelCase_ : str = ParquetWriter if file_format == "parquet" else ArrowWriter
lowerCamelCase_ : int = os.path.join(self._working_dir , os.path.basename(a_ ) ) if self._working_dir else fpath
lowerCamelCase_ : Optional[Any] = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase_ : int = self.config.features
lowerCamelCase_ : Any = self._writer_batch_size
lowerCamelCase_ : Tuple = self._fs.storage_options
def write_arrow(a_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId()
lowerCamelCase_ : Optional[int] = next(a_ , a_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Optional[int] = writer_class(
features=a_ , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(a_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase_ ,lowerCamelCase_ : List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
lowerCamelCase_ : List[str] = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(a_ )
if writer._num_bytes > 0:
lowerCamelCase_ ,lowerCamelCase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a_ ) ):
lowerCamelCase_ : str = os.path.join(os.path.dirname(a_ ) , os.path.basename(a_ ) )
shutil.move(a_ , a_ )
lowerCamelCase_ : int = (
self.df.mapInArrow(a_ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _UpperCamelCase ( self , a_ , a_ = "arrow" , a_ = None , a_ = None , **a_ , ):
self._validate_cache_dir()
lowerCamelCase_ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a_ )
lowerCamelCase_ : Dict = not is_remote_filesystem(self._fs )
lowerCamelCase_ : List[str] = os.path.join if is_local else posixpath.join
lowerCamelCase_ : Any = "-TTTTT-SSSSS-of-NNNNN"
lowerCamelCase_ : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
lowerCamelCase_ : int = path_join(self._output_dir , a_ )
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : int = 0
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Any = []
for task_id, content in self._prepare_split_single(a_ , a_ , a_ ):
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a_ )
lowerCamelCase_ : Dict = total_num_examples
lowerCamelCase_ : Any = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
lowerCamelCase_ : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase_ : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a_ , a_ , a_ , ):
rename(
a_ , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , )
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : Dict = 0
for i in range(len(a_ ) ):
lowerCamelCase_ ,lowerCamelCase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(a_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a_ , len(a_ ) ).map(lambda a_ : _rename_shard(*a_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[int] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(a_ , "" ) , )
def _UpperCamelCase ( self , a_ , ):
return SparkExamplesIterable(self.df )
| 73 | 1 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
__magic_name__ = None
__magic_name__ = {
'''7B''': 1_1_0_0_8,
'''13B''': 1_3_8_2_4,
'''30B''': 1_7_9_2_0,
'''65B''': 2_2_0_1_6,
'''70B''': 2_8_6_7_2,
}
__magic_name__ = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=256):
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "r") as f:
return json.load(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "w") as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True):
'''simple docstring'''
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_)
lowerCamelCase_ : Optional[int] = os.path.join(lowerCAmelCase_ , "tmp")
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_)
lowerCamelCase_ : Tuple = read_json(os.path.join(lowerCAmelCase_ , "params.json"))
lowerCamelCase_ : str = NUM_SHARDS[model_size]
lowerCamelCase_ : Dict = params["n_layers"]
lowerCamelCase_ : List[str] = params["n_heads"]
lowerCamelCase_ : Dict = n_heads // num_shards
lowerCamelCase_ : List[str] = params["dim"]
lowerCamelCase_ : str = dim // n_heads
lowerCamelCase_ : Optional[int] = 1_00_00.0
lowerCamelCase_ : str = 1.0 / (base ** (torch.arange(0 , lowerCAmelCase_ , 2).float() / dims_per_head))
if "n_kv_heads" in params:
lowerCamelCase_ : int = params["n_kv_heads"] # for GQA / MQA
lowerCamelCase_ : Dict = n_heads_per_shard // num_key_value_heads
lowerCamelCase_ : Tuple = dim // num_key_value_heads
else: # compatibility with other checkpoints
lowerCamelCase_ : List[str] = n_heads
lowerCamelCase_ : Optional[int] = n_heads_per_shard
lowerCamelCase_ : List[Any] = dim
# permute for sliced rotary
def permute(lowerCAmelCase_ , lowerCAmelCase_=n_heads , lowerCAmelCase_=dim , lowerCAmelCase_=dim):
return w.view(lowerCAmelCase_ , dima // n_heads // 2 , 2 , lowerCAmelCase_).transpose(1 , 2).reshape(lowerCAmelCase_ , lowerCAmelCase_)
print(F"""Fetching all parameters from the checkpoint at {input_base_path}.""")
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
lowerCamelCase_ : str = torch.load(os.path.join(lowerCAmelCase_ , "consolidated.00.pth") , map_location="cpu")
else:
# Sharded
lowerCamelCase_ : Tuple = [
torch.load(os.path.join(lowerCAmelCase_ , F"""consolidated.{i:02d}.pth""") , map_location="cpu")
for i in range(lowerCAmelCase_)
]
lowerCamelCase_ : Any = 0
lowerCamelCase_ : Union[str, Any] = {"weight_map": {}}
for layer_i in range(lowerCAmelCase_):
lowerCamelCase_ : str = F"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
lowerCamelCase_ : Optional[Any] = {
F"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wq.weight"""]),
F"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wk.weight"""]),
F"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[F"""layers.{layer_i}.attention.wv.weight"""],
F"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[F"""layers.{layer_i}.attention.wo.weight"""],
F"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w1.weight"""],
F"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w2.weight"""],
F"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w3.weight"""],
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[F"""layers.{layer_i}.attention_norm.weight"""],
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[F"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
lowerCamelCase_ : List[str] = {
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.attention_norm.weight"""
].clone(),
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
lowerCamelCase_ : str = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wq.weight"""].view(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
for i in range(lowerCAmelCase_)
] , dim=0 , ).reshape(lowerCAmelCase_ , lowerCAmelCase_))
lowerCamelCase_ : Tuple = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wk.weight"""].view(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
for i in range(lowerCAmelCase_)
] , dim=0 , ).reshape(lowerCAmelCase_ , lowerCAmelCase_) , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
lowerCamelCase_ : Dict = torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wv.weight"""].view(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
for i in range(lowerCAmelCase_)
] , dim=0 , ).reshape(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : Optional[int] = torch.cat(
[loaded[i][F"""layers.{layer_i}.attention.wo.weight"""] for i in range(lowerCAmelCase_)] , dim=1)
lowerCamelCase_ : Dict = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(lowerCAmelCase_)] , dim=0)
lowerCamelCase_ : Union[str, Any] = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(lowerCAmelCase_)] , dim=1)
lowerCamelCase_ : Dict = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(lowerCAmelCase_)] , dim=0)
lowerCamelCase_ : Optional[Any] = inv_freq
for k, v in state_dict.items():
lowerCamelCase_ : List[Any] = filename
param_count += v.numel()
torch.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_))
lowerCamelCase_ : Dict = F"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
lowerCamelCase_ : Dict = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
lowerCamelCase_ : Any = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(lowerCAmelCase_)] , dim=1),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(lowerCAmelCase_)] , dim=0),
}
for k, v in state_dict.items():
lowerCamelCase_ : str = filename
param_count += v.numel()
torch.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_))
# Write configs
lowerCamelCase_ : str = {"total_size": param_count * 2}
write_json(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , "pytorch_model.bin.index.json"))
lowerCamelCase_ : Union[str, Any] = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
lowerCamelCase_ : Any = params["multiple_of"] if "multiple_of" in params else 256
lowerCamelCase_ : Optional[int] = LlamaConfig(
hidden_size=lowerCAmelCase_ , intermediate_size=compute_intermediate_size(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=lowerCAmelCase_ , )
config.save_pretrained(lowerCAmelCase_)
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model.")
lowerCamelCase_ : List[Any] = LlamaForCausalLM.from_pretrained(lowerCAmelCase_ , torch_dtype=torch.floataa , low_cpu_mem_usage=lowerCAmelCase_)
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format.")
model.save_pretrained(lowerCAmelCase_ , safe_serialization=lowerCAmelCase_)
shutil.rmtree(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : str = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""")
lowerCamelCase_ : Tuple = tokenizer_class(lowerCAmelCase_)
tokenizer.save_pretrained(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Any = argparse.ArgumentParser()
parser.add_argument(
"--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , )
parser.add_argument(
"--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , )
parser.add_argument(
"--output_dir" , help="Location to write HF model and tokenizer" , )
parser.add_argument("--safe_serialization" , type=lowerCAmelCase_ , help="Whether or not to save using `safetensors`.")
lowerCamelCase_ : Dict = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
lowerCamelCase_ : Tuple = os.path.join(args.input_dir , "tokenizer.model")
write_tokenizer(args.output_dir , lowerCAmelCase_)
if __name__ == "__main__":
main()
| 73 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf)
lowerCamelCase_ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt))
lowerCamelCase_ : Optional[int] = new_cost_f
lowerCamelCase_ : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = -1
lowerCamelCase_ : Tuple = set()
lowerCamelCase_ : Dict = set()
lowerCamelCase_ : int = {source: 0}
lowerCamelCase_ : str = {destination: 0}
lowerCamelCase_ : Tuple = {source: None}
lowerCamelCase_ : Dict = {destination: None}
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : List[str] = np.inf
queue_forward.put((0, source))
queue_backward.put((0, destination))
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get()
visited_forward.add(lowerCAmelCase_)
lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get()
visited_backward.add(lowerCAmelCase_)
lowerCamelCase_ : Any = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
lowerCamelCase_ : Dict = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase_ : Union[str, Any] = shortest_distance
return shortest_path_distance
__magic_name__ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__magic_name__ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = CodeGenTokenizer
__UpperCAmelCase : Tuple = CodeGenTokenizerFast
__UpperCAmelCase : int = True
__UpperCAmelCase : Optional[Any] = {'''add_prefix_space''': True}
__UpperCAmelCase : Union[str, Any] = False
def _UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCamelCase_ : Dict = dict(zip(a_ , range(len(a_ ) ) ) )
lowerCamelCase_ : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCamelCase_ : str = {"unk_token": "<unk>"}
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def _UpperCamelCase ( self , **a_ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a_ )
def _UpperCamelCase ( self , **a_ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Any = "lower newer"
lowerCamelCase_ : Optional[Any] = "lower newer"
return input_text, output_text
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ : List[Any] = "lower newer"
lowerCamelCase_ : str = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCamelCase_ : str = tokenizer.tokenize(a_ , add_prefix_space=a_ )
self.assertListEqual(a_ , a_ )
lowerCamelCase_ : Dict = tokens + [tokenizer.unk_token]
lowerCamelCase_ : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def _UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
lowerCamelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase_ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=a_ )
lowerCamelCase_ : Union[str, Any] = "lower newer"
# Testing tokenization
lowerCamelCase_ : Tuple = tokenizer.tokenize(a_ , add_prefix_space=a_ )
lowerCamelCase_ : List[str] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids without special tokens
lowerCamelCase_ : Any = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
lowerCamelCase_ : Tuple = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids with special tokens
lowerCamelCase_ : Any = self.get_rust_tokenizer(add_prefix_space=a_ )
lowerCamelCase_ : Dict = tokenizer.encode(a_ , add_prefix_space=a_ )
lowerCamelCase_ : str = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
# Testing the unknown token
lowerCamelCase_ : int = tokens + [rust_tokenizer.unk_token]
lowerCamelCase_ : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _UpperCamelCase ( self , a_=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
# Simple input
lowerCamelCase_ : Optional[Any] = "This is a simple input"
lowerCamelCase_ : Tuple = ["This is a simple input 1", "This is a simple input 2"]
lowerCamelCase_ : int = ("This is a simple input", "This is a pair")
lowerCamelCase_ : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding="max_length" )
# Simple input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding="max_length" )
# Simple input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding="max_length" , )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding="max_length" )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding="max_length" )
# Pair input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding="max_length" , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowerCamelCase_ : Dict = "This is a simple input"
lowerCamelCase_ : List[Any] = ["This is a simple input looooooooong", "This is a simple input"]
lowerCamelCase_ : Optional[int] = ("This is a simple input", "This is a pair")
lowerCamelCase_ : Union[str, Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCamelCase_ : Optional[Any] = tokenizer.pad_token_id
lowerCamelCase_ : str = tokenizer(a_ , padding="max_length" , max_length=30 , return_tensors="np" )
lowerCamelCase_ : Tuple = tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors="np" )
lowerCamelCase_ : Any = tokenizer(*a_ , padding="max_length" , max_length=60 , return_tensors="np" )
lowerCamelCase_ : Optional[int] = tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = "$$$"
lowerCamelCase_ : Dict = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a_ , add_bos_token=a_ )
lowerCamelCase_ : Optional[Any] = "This is a simple input"
lowerCamelCase_ : List[Any] = ["This is a simple input 1", "This is a simple input 2"]
lowerCamelCase_ : str = tokenizer.bos_token_id
lowerCamelCase_ : Dict = tokenizer(a_ )
lowerCamelCase_ : List[str] = tokenizer(a_ )
self.assertEqual(out_s.input_ids[0] , a_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCamelCase_ : Union[str, Any] = tokenizer.decode(out_s.input_ids )
lowerCamelCase_ : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
lowerCamelCase_ : Union[str, Any] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
lowerCamelCase_ : str = "\nif len_a > len_b: result = a\nelse: result = b"
lowerCamelCase_ : str = tokenizer.encode(a_ )
lowerCamelCase_ : int = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
lowerCamelCase_ : Optional[Any] = tokenizer.decode(a_ , truncate_before_pattern=a_ )
self.assertEqual(a_ , a_ )
def _UpperCamelCase ( self ):
pass
| 73 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ctrl'''
__UpperCAmelCase : Dict = ['''past_key_values''']
__UpperCAmelCase : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a_=24_6534 , a_=256 , a_=1280 , a_=8192 , a_=48 , a_=16 , a_=0.1 , a_=0.1 , a_=1E-6 , a_=0.02 , a_=True , **a_ , ):
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : Any = n_positions
lowerCamelCase_ : Optional[int] = n_embd
lowerCamelCase_ : List[Any] = n_layer
lowerCamelCase_ : Union[str, Any] = n_head
lowerCamelCase_ : str = dff
lowerCamelCase_ : Tuple = resid_pdrop
lowerCamelCase_ : Any = embd_pdrop
lowerCamelCase_ : Dict = layer_norm_epsilon
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : Any = use_cache
super().__init__(**a_ )
| 73 | 1 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self , a_ ):
lowerCamelCase_ : Optional[int] = str(id_ )
lowerCamelCase_ : Any = None
lowerCamelCase_ : int = None
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self , a_ ):
return self.key < other.key
def __repr__( self ):
return self.id
def _UpperCamelCase ( self , a_ ):
self.neighbors.append(a_ )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Dict = weight
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1])
graph[b - 1].add_neighbor(graph[a - 1])
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowerCAmelCase_)
graph[b - 1].add_edge(graph[a - 1] , lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[str] = []
for u in graph:
lowerCamelCase_ : List[Any] = math.inf
lowerCamelCase_ : Any = None
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : Tuple = graph[:]
while q:
lowerCamelCase_ : Union[str, Any] = min(lowerCAmelCase_)
q.remove(lowerCAmelCase_)
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowerCamelCase_ : Tuple = u
lowerCamelCase_ : List[str] = u.edges[v.id]
for i in range(1 , len(lowerCAmelCase_)):
a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1))
return a
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
for u in graph:
lowerCamelCase_ : Any = math.inf
lowerCamelCase_ : Optional[Any] = None
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Optional[Any] = list(lowerCAmelCase_)
hq.heapify(lowerCAmelCase_)
while h:
lowerCamelCase_ : Union[str, Any] = hq.heappop(lowerCAmelCase_)
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowerCamelCase_ : List[Any] = u
lowerCamelCase_ : Union[str, Any] = u.edges[v.id]
hq.heapify(lowerCAmelCase_)
for i in range(1 , len(lowerCAmelCase_)):
yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1)
def __magic_name__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCamelCase )}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
__UpperCAmelCase : str = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
def _UpperCamelCase ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(default=__lowerCamelCase, metadata={'''help''': '''The input training data file (a text file).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__UpperCAmelCase : Optional[int] = field(
default=5, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
__UpperCAmelCase : float = field(
default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
def _UpperCamelCase ( self ):
if self.train_file is not None:
lowerCamelCase_ : str = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase_ : Union[str, Any] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "r" , encoding="utf-8") as f:
lowerCamelCase_ : Tuple = [json.loads(lowerCAmelCase_) for line in f.read().splitlines() if (len(lowerCAmelCase_) > 0 and not line.isspace())]
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
lowerCamelCase_ : Any = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase_ : List[Any] = refs
return Dataset.from_dict(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase_ : List[str] = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : Dict = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""")
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name)
if "validation" not in datasets.keys():
lowerCamelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
lowerCamelCase_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
lowerCamelCase_ : Dict = {}
if data_args.train_file is not None:
lowerCamelCase_ : str = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Any = data_args.validation_file
lowerCamelCase_ : Any = data_args.train_file.split(".")[-1]
if extension == "txt":
lowerCamelCase_ : List[str] = "text"
lowerCamelCase_ : Dict = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : Optional[Any] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""")
config.update_from_string(model_args.config_overrides)
logger.info(F"""New config: {config}""")
lowerCamelCase_ : List[str] = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name.")
if model_args.model_name_or_path:
lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch")
lowerCamelCase_ : Dict = AutoModelForMaskedLM.from_config(lowerCAmelCase_)
model.resize_token_embeddings(len(lowerCAmelCase_))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase_ : Optional[Any] = datasets["train"].column_names
else:
lowerCamelCase_ : Dict = datasets["validation"].column_names
lowerCamelCase_ : Union[str, Any] = "text" if "text" in column_names else column_names[0]
lowerCamelCase_ : Optional[Any] = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_):
# Remove empty lines
lowerCamelCase_ : str = [line for line in examples["text"] if len(lowerCAmelCase_) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length)
lowerCamelCase_ : str = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase_ : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file)
if data_args.validation_ref_file is not None:
lowerCamelCase_ : List[str] = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file)
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase_ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase_ : Union[str, Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase_ : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
lowerCamelCase_ : int = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase_ : Dict = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
lowerCamelCase_ : Dict = model_args.model_name_or_path
else:
lowerCamelCase_ : int = None
lowerCamelCase_ : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_)
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Tuple = os.path.join(training_args.output_dir , "train_results.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(train_result.metrics.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json"))
# Evaluation
lowerCamelCase_ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
lowerCamelCase_ : Tuple = trainer.evaluate()
lowerCamelCase_ : str = math.exp(eval_output["eval_loss"])
lowerCamelCase_ : Tuple = perplexity
lowerCamelCase_ : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Eval results *****")
for key, value in sorted(results.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
return results
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 73 | 1 |
import sys
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = len(lowerCAmelCase_)
lowerCamelCase_ : str = [[0 for x in range(lowerCAmelCase_)] for x in range(lowerCAmelCase_)]
lowerCamelCase_ : Dict = [[0 for x in range(lowerCAmelCase_)] for x in range(lowerCAmelCase_)]
for chain_length in range(2 , lowerCAmelCase_):
for a in range(1 , n - chain_length + 1):
lowerCamelCase_ : Optional[int] = a + chain_length - 1
lowerCamelCase_ : List[Any] = sys.maxsize
for c in range(lowerCAmelCase_ , lowerCAmelCase_):
lowerCamelCase_ : str = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowerCamelCase_ : Optional[int] = cost
lowerCamelCase_ : Union[str, Any] = c
return matrix, sol
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if i == j:
print("A" + str(lowerCAmelCase_) , end=" ")
else:
print("(" , end=" ")
print_optiomal_solution(lowerCAmelCase_ , lowerCAmelCase_ , optimal_solution[i][j])
print_optiomal_solution(lowerCAmelCase_ , optimal_solution[i][j] + 1 , lowerCAmelCase_)
print(")" , end=" ")
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Dict = [30, 35, 15, 5, 10, 20, 25]
lowerCamelCase_ : Dict = len(lowerCAmelCase_)
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = matrix_chain_order(lowerCAmelCase_)
print("No. of Operation required: " + str(matrix[1][n - 1]))
print_optiomal_solution(lowerCAmelCase_ , 1 , n - 1)
if __name__ == "__main__":
main()
| 73 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
# setable values
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _UpperCamelCase ( cls ):
return cls()
@dataclass
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : KarrasVeSchedulerState
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
return True
@register_to_config
def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.0_07 , a_ = 80 , a_ = 0.05 , a_ = 50 , ):
pass
def _UpperCamelCase ( self ):
return KarrasVeSchedulerState.create()
def _UpperCamelCase ( self , a_ , a_ , a_ = () ):
lowerCamelCase_ : List[Any] = jnp.arange(0 , a_ )[::-1].copy()
lowerCamelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase_ : Union[str, Any] = random.split(a_ , num=1 )
lowerCamelCase_ : str = self.config.s_noise * random.normal(key=a_ , shape=sample.shape )
lowerCamelCase_ : List[str] = sigma + gamma * sigma
lowerCamelCase_ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCamelCase_ : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output
lowerCamelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
raise NotImplementedError()
| 73 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__magic_name__ = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
__magic_name__ = {
'''moussaKam/mbarthez''': 1_0_2_4,
'''moussaKam/barthez''': 1_0_2_4,
'''moussaKam/barthez-orangesum-title''': 1_0_2_4,
}
__magic_name__ = '''▁'''
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self , a_ , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
lowerCamelCase_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCamelCase_ : List[Any] = vocab_file
lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a_ ) )
lowerCamelCase_ : Union[str, Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
lowerCamelCase_ : List[Any] = len(self.sp_model ) - 1
lowerCamelCase_ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _UpperCamelCase ( self , a_ , a_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
lowerCamelCase_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : List[Any] = [self.sep_token_id]
lowerCamelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase ( self ):
return len(self.sp_model )
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self , a_ ):
return self.sp_model.encode(a_ , out_type=a_ )
def _UpperCamelCase ( self , a_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase_ : List[Any] = self.sp_model.PieceToId(a_ )
return spm_id if spm_id else self.unk_token_id
def _UpperCamelCase ( self , a_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(a_ )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Any = []
lowerCamelCase_ : str = ""
lowerCamelCase_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a_ ) + token
lowerCamelCase_ : Dict = True
lowerCamelCase_ : Any = []
else:
current_sub_tokens.append(a_ )
lowerCamelCase_ : Optional[int] = False
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def __getstate__( self ):
lowerCamelCase_ : Union[str, Any] = self.__dict__.copy()
lowerCamelCase_ : Dict = None
return state
def __setstate__( self , a_ ):
lowerCamelCase_ : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : List[str] = {}
lowerCamelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Optional[int] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
lowerCamelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 73 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = StableDiffusionDiffEditPipeline
__UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
__UpperCAmelCase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : List[str] = frozenset([] )
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , )
lowerCamelCase_ : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
lowerCamelCase_ : Dict = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_zero=a_ , )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase_ : Optional[Any] = CLIPTextModel(a_ )
lowerCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : str = floats_tensor((1, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : List[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : List[Any] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Tuple = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Any = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Tuple = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : int = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Optional[int] = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Optional[int] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Union[str, Any] = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self ):
if not hasattr(self.pipeline_class , "_optional_components" ):
return
lowerCamelCase_ : List[Any] = self.get_dummy_components()
lowerCamelCase_ : int = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a_ , a_ , a_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCamelCase_ : int = self.get_dummy_inputs(a_ )
lowerCamelCase_ : int = pipe(**a_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a_ )
lowerCamelCase_ : Optional[int] = self.pipeline_class.from_pretrained(a_ )
pipe_loaded.to(a_ )
pipe_loaded.set_progress_bar_config(disable=a_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a_ , a_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCamelCase_ : List[str] = self.get_dummy_inputs(a_ )
lowerCamelCase_ : Optional[int] = pipe_loaded(**a_ )[0]
lowerCamelCase_ : Optional[int] = np.abs(output - output_loaded ).max()
self.assertLess(a_ , 1E-4 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : List[Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = self.get_dummy_mask_inputs(a_ )
lowerCamelCase_ : int = pipe.generate_mask(**a_ )
lowerCamelCase_ : List[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCamelCase_ : List[str] = np.array([0] * 9 )
lowerCamelCase_ : Optional[int] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Dict = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : Dict = pipe.invert(**a_ ).images
lowerCamelCase_ : str = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Dict = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
def _UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : int = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"}
lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler(**a_ )
lowerCamelCase_ : List[str] = DPMSolverMultistepInverseScheduler(**a_ )
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : int = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : str = pipe.invert(**a_ ).images
lowerCamelCase_ : int = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Union[str, Any] = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _UpperCamelCase ( cls ):
lowerCamelCase_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
lowerCamelCase_ : int = raw_image.convert("RGB" ).resize((768, 768) )
lowerCamelCase_ : List[Any] = raw_image
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = torch.manual_seed(0 )
lowerCamelCase_ : Tuple = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : str = "a bowl of fruit"
lowerCamelCase_ : Optional[int] = "a bowl of pears"
lowerCamelCase_ : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ ).latents
lowerCamelCase_ : List[str] = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = "a bowl of fruit"
lowerCamelCase_ : Dict = "a bowl of pears"
lowerCamelCase_ : Optional[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ , num_inference_steps=25 , ).latents
lowerCamelCase_ : Any = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 73 | 1 |
from __future__ import annotations
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0")
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0")
if principal <= 0:
raise ValueError("principal must be > 0")
return principal * daily_interest_rate * days_between_payments
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0")
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0")
if principal <= 0:
raise ValueError("principal must be > 0")
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0")
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0")
if principal <= 0:
raise ValueError("principal must be > 0")
return compound_interest(
lowerCAmelCase_ , nominal_annual_percentage_rate / 365 , number_of_years * 365)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ["a", "b", "c"]
# Defaults to last layer if both are None
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
lowerCamelCase_ ,lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = BackboneMixin()
lowerCamelCase_ : List[Any] = ["a", "b", "c"]
lowerCamelCase_ : Optional[int] = ["a", "c"]
lowerCamelCase_ : Dict = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase_ : Union[str, Any] = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCamelCase_ : str = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 73 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''luke'''
def __init__( self , a_=5_0267 , a_=50_0000 , a_=768 , a_=256 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=True , a_=None , a_=1 , a_=0 , a_=2 , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : Optional[int] = entity_vocab_size
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Dict = entity_emb_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Tuple = intermediate_size
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Optional[Any] = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : Optional[int] = use_entity_aware_attention
lowerCamelCase_ : str = classifier_dropout
| 73 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase : Tuple = ['''accelerate''', '''launch''']
__UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase : int = '''default_config.yaml'''
__UpperCAmelCase : Tuple = config_folder / config_file
__UpperCAmelCase : int = config_folder / '''_default_config.yaml'''
__UpperCAmelCase : int = Path('''tests/test_configs''' )
@classmethod
def _UpperCamelCase ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCamelCase ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=a_ ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''test-tpu'''
__UpperCAmelCase : Tuple = '''us-central1-a'''
__UpperCAmelCase : Tuple = '''ls'''
__UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase : Dict = '''cd /usr/share'''
__UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh'''
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
| 73 | 1 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowerCAmelCase_), magnitude * sin(lowerCAmelCase_)]
return [magnitude * cos(radians(lowerCAmelCase_)), magnitude * sin(radians(lowerCAmelCase_))]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1):
'''simple docstring'''
lowerCamelCase_ : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : float = sum(lowerCAmelCase_)
return abs(lowerCAmelCase_) < eps
if __name__ == "__main__":
# Test to check if it works
__magic_name__ = array(
[
polar_force(7_18.4, 1_8_0 - 3_0),
polar_force(8_79.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__magic_name__ = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__magic_name__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__magic_name__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ ):
super().__init__()
self.register_modules(vqvae=a_ , unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self , a_ = 1 , a_ = None , a_ = 0.0 , a_ = 50 , a_ = "pil" , a_ = True , **a_ , ):
lowerCamelCase_ : Optional[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , )
lowerCamelCase_ : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase_ : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ : Optional[int] = {}
if accepts_eta:
lowerCamelCase_ : Optional[int] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase_ : Dict = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
lowerCamelCase_ : Optional[Any] = self.unet(a_ , a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ : List[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# decode the image latents with the VAE
lowerCamelCase_ : str = self.vqvae.decode(a_ ).sample
lowerCamelCase_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ : Optional[Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 73 | 1 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = (DDPMParallelScheduler,)
def _UpperCamelCase ( self , **a_ ):
lowerCamelCase_ : Dict = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**a_ )
return config
def _UpperCamelCase ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=a_ )
def _UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=a_ , beta_end=a_ )
def _UpperCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a_ )
def _UpperCamelCase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=a_ )
def _UpperCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a_ )
def _UpperCamelCase ( self ):
self.check_over_configs(thresholding=a_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=a_ , prediction_type=a_ , sample_max_value=a_ , )
def _UpperCamelCase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def _UpperCamelCase ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self.scheduler_classes[0]
lowerCamelCase_ : str = self.get_scheduler_config()
lowerCamelCase_ : str = scheduler_class(**a_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self.scheduler_classes[0]
lowerCamelCase_ : List[str] = self.get_scheduler_config()
lowerCamelCase_ : str = scheduler_class(**a_ )
lowerCamelCase_ : List[str] = len(a_ )
lowerCamelCase_ : int = self.dummy_model()
lowerCamelCase_ : List[Any] = self.dummy_sample_deter
lowerCamelCase_ : str = self.dummy_sample_deter + 0.1
lowerCamelCase_ : Dict = self.dummy_sample_deter - 0.1
lowerCamelCase_ : List[Any] = samplea.shape[0]
lowerCamelCase_ : str = torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCamelCase_ : Optional[Any] = torch.arange(a_ )[0:3, None].repeat(1 , a_ )
lowerCamelCase_ : Optional[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCamelCase_ : Optional[Any] = scheduler.batch_step_no_noise(a_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
lowerCamelCase_ : str = torch.sum(torch.abs(a_ ) )
lowerCamelCase_ : Tuple = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 11_53.18_33 ) < 1E-2
assert abs(result_mean.item() - 0.50_05 ) < 1E-3
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = self.scheduler_classes[0]
lowerCamelCase_ : Dict = self.get_scheduler_config()
lowerCamelCase_ : List[Any] = scheduler_class(**a_ )
lowerCamelCase_ : Optional[int] = len(a_ )
lowerCamelCase_ : List[Any] = self.dummy_model()
lowerCamelCase_ : Union[str, Any] = self.dummy_sample_deter
lowerCamelCase_ : int = torch.manual_seed(0 )
for t in reversed(range(a_ ) ):
# 1. predict noise residual
lowerCamelCase_ : int = model(a_ , a_ )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ : Optional[int] = scheduler.step(a_ , a_ , a_ , generator=a_ ).prev_sample
lowerCamelCase_ : Union[str, Any] = pred_prev_sample
lowerCamelCase_ : Optional[Any] = torch.sum(torch.abs(a_ ) )
lowerCamelCase_ : Optional[int] = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2
assert abs(result_mean.item() - 0.33_72 ) < 1E-3
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.scheduler_classes[0]
lowerCamelCase_ : Union[str, Any] = self.get_scheduler_config(prediction_type="v_prediction" )
lowerCamelCase_ : Dict = scheduler_class(**a_ )
lowerCamelCase_ : Optional[int] = len(a_ )
lowerCamelCase_ : Optional[int] = self.dummy_model()
lowerCamelCase_ : str = self.dummy_sample_deter
lowerCamelCase_ : int = torch.manual_seed(0 )
for t in reversed(range(a_ ) ):
# 1. predict noise residual
lowerCamelCase_ : List[Any] = model(a_ , a_ )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ : Union[str, Any] = scheduler.step(a_ , a_ , a_ , generator=a_ ).prev_sample
lowerCamelCase_ : Any = pred_prev_sample
lowerCamelCase_ : List[Any] = torch.sum(torch.abs(a_ ) )
lowerCamelCase_ : Optional[int] = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2
assert abs(result_mean.item() - 0.26_31 ) < 1E-3
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase_ : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase_ : List[Any] = scheduler_class(**a_ )
lowerCamelCase_ : Any = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=a_ )
lowerCamelCase_ : Dict = scheduler.timesteps
for i, timestep in enumerate(a_ ):
if i == len(a_ ) - 1:
lowerCamelCase_ : Optional[int] = -1
else:
lowerCamelCase_ : Union[str, Any] = timesteps[i + 1]
lowerCamelCase_ : Optional[int] = scheduler.previous_timestep(a_ )
lowerCamelCase_ : Any = prev_t.item()
self.assertEqual(a_ , a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = self.scheduler_classes[0]
lowerCamelCase_ : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase_ : List[Any] = scheduler_class(**a_ )
lowerCamelCase_ : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(a_ , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase_ : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase_ : int = scheduler_class(**a_ )
lowerCamelCase_ : Any = [100, 87, 50, 1, 0]
lowerCamelCase_ : Any = len(a_ )
with self.assertRaises(a_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=a_ , timesteps=a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.scheduler_classes[0]
lowerCamelCase_ : List[Any] = self.get_scheduler_config()
lowerCamelCase_ : List[str] = scheduler_class(**a_ )
lowerCamelCase_ : Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=a_ )
| 73 |
import re
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if len(re.findall("[ATCG]" , lowerCAmelCase_)) != len(lowerCAmelCase_):
raise ValueError("Invalid Strand")
return dna.translate(dna.maketrans("ATCG" , "TAGC"))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__magic_name__ = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def __magic_name__ ( lowerCAmelCase_=True):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__lowerCamelCase ) )
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : int = None
__UpperCAmelCase : Union[str, Any] = None
def _UpperCamelCase ( self , a_ , a_ ):
with TemporaryDirectory() as tmp_dir:
lowerCamelCase_ : Tuple = dataset_module_factory(a_ , cache_dir=a_ )
lowerCamelCase_ : List[str] = import_main_class(dataset_module.module_path , dataset=a_ )
lowerCamelCase_ : DatasetBuilder = builder_cls(
cache_dir=a_ , config_name=a_ , hash=dataset_module.hash , )
lowerCamelCase_ : List[Any] = "/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a_ ).replace(os.sep , "/" ),
config.DATASET_INFO_FILENAME,
] )
lowerCamelCase_ : str = cached_path(a_ , cache_dir=a_ )
self.assertTrue(os.path.exists(a_ ) )
@pytest.mark.integration
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = tmp_path_factory.mktemp("test_hf_gcp") / "test_wikipedia_simple"
lowerCamelCase_ : Optional[Any] = dataset_module_factory("wikipedia" , cache_dir=lowerCAmelCase_)
lowerCamelCase_ : Any = import_main_class(dataset_module.module_path)
lowerCamelCase_ : DatasetBuilder = builder_cls(
cache_dir=lowerCAmelCase_ , config_name="20220301.frr" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowerCamelCase_ : Any = None
builder_instance.download_and_prepare()
lowerCamelCase_ : Optional[Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[Any] = dataset_module_factory("wikipedia" , cache_dir=lowerCAmelCase_)
lowerCamelCase_ : int = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_)
lowerCamelCase_ : DatasetBuilder = builder_cls(
cache_dir=lowerCAmelCase_ , config_name="20220301.frr" , hash=dataset_module.hash , )
lowerCamelCase_ : Any = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_)
assert "train" in ds
assert isinstance(ds["train"] , lowerCAmelCase_)
assert next(iter(ds["train"]))
| 73 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowerCAmelCase_), magnitude * sin(lowerCAmelCase_)]
return [magnitude * cos(radians(lowerCAmelCase_)), magnitude * sin(radians(lowerCAmelCase_))]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1):
'''simple docstring'''
lowerCamelCase_ : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : float = sum(lowerCAmelCase_)
return abs(lowerCAmelCase_) < eps
if __name__ == "__main__":
# Test to check if it works
__magic_name__ = array(
[
polar_force(7_18.4, 1_8_0 - 3_0),
polar_force(8_79.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__magic_name__ = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__magic_name__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__magic_name__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 | 1 |
# using dfs for finding eulerian path traversal
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowerCamelCase_ ,lowerCamelCase_ : Any = True, True
lowerCamelCase_ : List[Any] = dfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return path
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = 0
lowerCamelCase_ : Optional[int] = -1
for i in range(lowerCAmelCase_):
if i not in graph.keys():
continue
if len(graph[i]) % 2 == 1:
odd_degree_nodes += 1
lowerCamelCase_ : Any = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : str = [[False for _ in range(max_node + 1)] for _ in range(max_node + 1)]
lowerCamelCase_ ,lowerCamelCase_ : Any = check_circuit_or_path(lowerCAmelCase_ , lowerCAmelCase_)
if check == 3:
print("graph is not Eulerian")
print("no path")
return
lowerCamelCase_ : Any = 1
if check == 2:
lowerCamelCase_ : int = odd_node
print("graph has a Euler path")
if check == 1:
print("graph has a Euler cycle")
lowerCamelCase_ : Optional[int] = dfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
print(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowerCamelCase_ : str = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowerCamelCase_ : List[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowerCamelCase_ : str = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowerCamelCase_ : List[Any] = {
1: [],
2: []
# all degree is zero
}
lowerCamelCase_ : Dict = 10
check_euler(lowerCAmelCase_ , lowerCAmelCase_)
check_euler(lowerCAmelCase_ , lowerCAmelCase_)
check_euler(lowerCAmelCase_ , lowerCAmelCase_)
check_euler(lowerCAmelCase_ , lowerCAmelCase_)
check_euler(lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
main()
| 73 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ClapFeatureExtractor'''
__UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
def __call__( self , a_=None , a_=None , a_=None , **a_ ):
lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if audios is not None:
lowerCamelCase_ : List[str] = self.feature_extractor(
a_ , sampling_rate=a_ , return_tensors=a_ , **a_ )
if text is not None and audios is not None:
lowerCamelCase_ : List[str] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.tokenizer.model_input_names
lowerCamelCase_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 73 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
def is_in_circle(lowerCAmelCase_ , lowerCAmelCase_) -> bool:
lowerCamelCase_ : Optional[int] = sqrt((x**2) + (y**2))
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowerCamelCase_ : str = mean(
int(is_in_circle(uniform(-1.0 , 1.0) , uniform(-1.0 , 1.0)))
for _ in range(lowerCAmelCase_))
# The ratio of the area for circle to square is pi/4.
lowerCamelCase_ : List[Any] = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""")
print(F"""The numpy value of pi is {pi}""")
print(F"""The total error is {abs(pi - pi_estimate)}""")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(lowerCAmelCase_ , lowerCAmelCase_)) for _ in range(lowerCAmelCase_)) * (max_value - min_value)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0):
'''simple docstring'''
def identity_function(lowerCAmelCase_) -> float:
return x
lowerCamelCase_ : str = area_under_curve_estimator(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : int = (max_value * max_value - min_value * min_value) / 2
print("******************")
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""")
print(F"""Estimated value is {estimated_value}""")
print(F"""Expected value is {expected_value}""")
print(F"""Total error is {abs(estimated_value - expected_value)}""")
print("******************")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
def function_to_integrate(lowerCAmelCase_) -> float:
return sqrt(4.0 - x * x)
lowerCamelCase_ : Dict = area_under_curve_estimator(
lowerCAmelCase_ , lowerCAmelCase_ , 0.0 , 2.0)
print("******************")
print("Estimating pi using area_under_curve_estimator")
print(F"""Estimated value is {estimated_value}""")
print(F"""Expected value is {pi}""")
print(F"""Total error is {abs(estimated_value - pi)}""")
print("******************")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : Any = set()
# Replace all the whitespace in our sentence
lowerCamelCase_ : str = input_str.replace(" " , "")
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(lowerCAmelCase_) == 26
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = [False] * 26
for char in input_str:
if char.islower():
lowerCamelCase_ : List[Any] = True
elif char.isupper():
lowerCamelCase_ : Optional[int] = True
return all(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def __magic_name__ ( ):
'''simple docstring'''
from timeit import timeit
lowerCamelCase_ : Optional[int] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_faster()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_fastest()" , setup=lowerCAmelCase_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 73 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''cvt'''
def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : str = patch_sizes
lowerCamelCase_ : List[Any] = patch_stride
lowerCamelCase_ : str = patch_padding
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Union[str, Any] = num_heads
lowerCamelCase_ : Optional[Any] = depth
lowerCamelCase_ : int = mlp_ratio
lowerCamelCase_ : Union[str, Any] = attention_drop_rate
lowerCamelCase_ : Optional[Any] = drop_rate
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Union[str, Any] = qkv_bias
lowerCamelCase_ : int = cls_token
lowerCamelCase_ : int = qkv_projection_method
lowerCamelCase_ : int = kernel_qkv
lowerCamelCase_ : Optional[Any] = padding_kv
lowerCamelCase_ : Optional[int] = stride_kv
lowerCamelCase_ : Optional[int] = padding_q
lowerCamelCase_ : List[Any] = stride_q
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = layer_norm_eps
| 73 |
__magic_name__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase_ : List[Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(lowerCAmelCase_)}"""
)
raise ValueError(lowerCAmelCase_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = RemBertConfig.from_json_file(lowerCAmelCase_)
print("Building PyTorch model from configuration: {}".format(str(lowerCAmelCase_)))
lowerCamelCase_ : List[Any] = RemBertModel(lowerCAmelCase_)
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# Save pytorch-model
print("Save PyTorch model to {}".format(lowerCAmelCase_))
torch.save(model.state_dict() , lowerCAmelCase_)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 73 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''spiece.model'''}
__magic_name__ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__magic_name__ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 3
__magic_name__ = 4
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] = '''left'''
def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = do_lower_case
lowerCamelCase_ : str = remove_space
lowerCamelCase_ : Tuple = keep_accents
lowerCamelCase_ : Dict = vocab_file
lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def _UpperCamelCase ( self ):
return len(self.sp_model )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase_ : Any = self.__dict__.copy()
lowerCamelCase_ : Optional[int] = None
return state
def __setstate__( self , a_ ):
lowerCamelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : int = {}
lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , a_ ):
if self.remove_space:
lowerCamelCase_ : Optional[int] = " ".join(inputs.strip().split() )
else:
lowerCamelCase_ : str = inputs
lowerCamelCase_ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase_ : Dict = unicodedata.normalize("NFKD" , a_ )
lowerCamelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
lowerCamelCase_ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : List[Any] = self.preprocess_text(a_ )
lowerCamelCase_ : Optional[int] = self.sp_model.encode(a_ , out_type=a_ )
lowerCamelCase_ : List[str] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ : int = cur_pieces[1:]
else:
lowerCamelCase_ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def _UpperCamelCase ( self , a_ ):
return self.sp_model.PieceToId(a_ )
def _UpperCamelCase ( self , a_ ):
return self.sp_model.IdToPiece(a_ )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Dict = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def _UpperCamelCase ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ):
lowerCamelCase_ : int = kwargs.pop("use_source_tokenizer" , a_ )
lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
lowerCamelCase_ : Union[str, Any] = []
sub_texts.append(a_ )
else:
current_sub_text.append(a_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase_ : Union[str, Any] = "".join(a_ )
lowerCamelCase_ : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase_ : List[Any] = self.clean_up_tokenization(a_ )
return clean_text
else:
return text
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Any = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 73 | 1 |
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if any(not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or x < 0 for x in sequence):
raise TypeError("Sequence must be list of non-negative integers")
for _ in range(len(lowerCAmelCase_)):
for i, (rod_upper, rod_lower) in enumerate(zip(lowerCAmelCase_ , sequence[1:])):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 73 |
def __magic_name__ ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = True):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return int((number_a + number_a) / 2)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(lowerCAmelCase_) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowerCamelCase_ : Optional[int] = lower
lowerCamelCase_ : Tuple = higher
lowerCamelCase_ : Union[str, Any] = []
while True:
lowerCamelCase_ : Optional[int] = get_avg(lowerCAmelCase_ , lowerCAmelCase_)
last_numbers.append(lowerCAmelCase_)
if answer(lowerCAmelCase_) == "low":
lowerCamelCase_ : Any = number
elif answer(lowerCAmelCase_) == "high":
lowerCamelCase_ : Optional[int] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""")
print(F"""details : {last_numbers!s}""")
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = int(input("Enter lower value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter high value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter value to guess : ").strip())
guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
main()
| 73 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__magic_name__ = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__magic_name__ = {
'''RUCAIBox/mvp''': 1_0_2_4,
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : int = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase : List[str] = MvpTokenizer
def __init__( self , a_=None , a_=None , a_=None , a_="replace" , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=False , a_=True , **a_ , ):
super().__init__(
a_ , a_ , tokenizer_file=a_ , errors=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , add_prefix_space=a_ , trim_offsets=a_ , **a_ , )
lowerCamelCase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowerCamelCase_ : List[str] = getattr(a_ , pre_tok_state.pop("type" ) )
lowerCamelCase_ : str = add_prefix_space
lowerCamelCase_ : Dict = pre_tok_class(**a_ )
lowerCamelCase_ : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCamelCase_ : int = "post_processor"
lowerCamelCase_ : List[Any] = getattr(self.backend_tokenizer , a_ , a_ )
if tokenizer_component_instance:
lowerCamelCase_ : Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase_ : Union[str, Any] = tuple(state["sep"] )
if "cls" in state:
lowerCamelCase_ : List[Any] = tuple(state["cls"] )
lowerCamelCase_ : Any = False
if state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowerCamelCase_ : Any = add_prefix_space
lowerCamelCase_ : List[Any] = True
if state.get("trim_offsets" , a_ ) != trim_offsets:
lowerCamelCase_ : Optional[Any] = trim_offsets
lowerCamelCase_ : List[Any] = True
if changes_to_apply:
lowerCamelCase_ : List[str] = getattr(a_ , state.pop("type" ) )
lowerCamelCase_ : int = component_class(**a_ )
setattr(self.backend_tokenizer , a_ , a_ )
@property
def _UpperCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else value
lowerCamelCase_ : Optional[int] = value
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Union[str, Any] = kwargs.get("is_split_into_words" , a_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Optional[Any] = kwargs.get("is_split_into_words" , a_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : int = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
def _UpperCamelCase ( self , a_ , a_=None ):
lowerCamelCase_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Tuple = [self.sep_token_id]
lowerCamelCase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 73 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''cvt'''
def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : str = patch_sizes
lowerCamelCase_ : List[Any] = patch_stride
lowerCamelCase_ : str = patch_padding
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Union[str, Any] = num_heads
lowerCamelCase_ : Optional[Any] = depth
lowerCamelCase_ : int = mlp_ratio
lowerCamelCase_ : Union[str, Any] = attention_drop_rate
lowerCamelCase_ : Optional[Any] = drop_rate
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Union[str, Any] = qkv_bias
lowerCamelCase_ : int = cls_token
lowerCamelCase_ : int = qkv_projection_method
lowerCamelCase_ : int = kernel_qkv
lowerCamelCase_ : Optional[Any] = padding_kv
lowerCamelCase_ : Optional[int] = stride_kv
lowerCamelCase_ : Optional[int] = padding_q
lowerCamelCase_ : List[Any] = stride_q
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = layer_norm_eps
| 73 | 1 |
import json
import sys
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , encoding="utf-8") as f:
lowerCamelCase_ : Union[str, Any] = json.load(lowerCAmelCase_)
lowerCamelCase_ : Tuple = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(lowerCAmelCase_):
lowerCamelCase_ : Tuple = results[benchmark_name]
lowerCamelCase_ : List[str] = benchmark_name.split("/")[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""")
lowerCamelCase_ : Union[str, Any] = "| metric |"
lowerCamelCase_ : Tuple = "|--------|"
lowerCamelCase_ : Optional[int] = "| new / old (diff) |"
for metric_name in sorted(lowerCAmelCase_):
lowerCamelCase_ : List[Any] = benchmark_res[metric_name]
lowerCamelCase_ : Union[str, Any] = metric_vals["new"]
lowerCamelCase_ : int = metric_vals.get("old" , lowerCAmelCase_)
lowerCamelCase_ : Dict = metric_vals.get("diff" , lowerCAmelCase_)
lowerCamelCase_ : Tuple = F""" {new_val:f}""" if isinstance(lowerCAmelCase_ , (int, float)) else "None"
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(lowerCAmelCase_ , (int, float)) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(lowerCAmelCase_ , (int, float)) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>")
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as f:
f.writelines("\n".join(lowerCAmelCase_))
if __name__ == "__main__":
__magic_name__ = sys.argv[1]
__magic_name__ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 73 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 1 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : str = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
lowerCamelCase_ : Dict = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_).raw).convert("RGB")
lowerCamelCase_ : Any = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11)),
])
lowerCamelCase_ : List[str] = transform(lowerCAmelCase_).unsqueeze(0).to(lowerCAmelCase_)
return image
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if "visual_encoder" in key:
lowerCamelCase_ : Tuple = re.sub("visual_encoder*" , "vision_model.encoder" , lowerCAmelCase_)
if "blocks" in key:
lowerCamelCase_ : Optional[Any] = re.sub(R"blocks" , "layers" , lowerCAmelCase_)
if "attn" in key:
lowerCamelCase_ : Union[str, Any] = re.sub(R"attn" , "self_attn" , lowerCAmelCase_)
if "norm1" in key:
lowerCamelCase_ : Tuple = re.sub(R"norm1" , "layer_norm1" , lowerCAmelCase_)
if "norm2" in key:
lowerCamelCase_ : List[str] = re.sub(R"norm2" , "layer_norm2" , lowerCAmelCase_)
if "encoder.norm" in key:
lowerCamelCase_ : int = re.sub(R"encoder.norm" , "post_layernorm" , lowerCAmelCase_)
if "encoder.patch_embed.proj" in key:
lowerCamelCase_ : str = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , lowerCAmelCase_)
if "encoder.pos_embed" in key:
lowerCamelCase_ : Tuple = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , lowerCAmelCase_)
if "encoder.cls_token" in key:
lowerCamelCase_ : List[Any] = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , lowerCAmelCase_)
if "self_attn" in key:
lowerCamelCase_ : Any = re.sub(R"self_attn.proj" , "self_attn.projection" , lowerCAmelCase_)
return key
@torch.no_grad()
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=None):
'''simple docstring'''
if config_path is not None:
lowerCamelCase_ : Union[str, Any] = BlipConfig.from_pretrained(lowerCAmelCase_)
else:
lowerCamelCase_ : Optional[int] = BlipConfig(projection_dim=512 , text_config={} , vision_config={})
lowerCamelCase_ : Optional[Any] = BlipForConditionalGeneration(lowerCAmelCase_).eval()
lowerCamelCase_ : Optional[Any] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
lowerCamelCase_ : Dict = blip_decoder(pretrained=lowerCAmelCase_ , image_size=384 , vit="base")
lowerCamelCase_ : Union[str, Any] = pt_model.eval()
lowerCamelCase_ : str = pt_model.state_dict()
for key in modified_state_dict.copy():
lowerCamelCase_ : Dict = modified_state_dict.pop(lowerCAmelCase_)
lowerCamelCase_ : Dict = rename_key(lowerCAmelCase_)
lowerCamelCase_ : Tuple = value
hf_model.load_state_dict(lowerCAmelCase_)
lowerCamelCase_ : str = 384
lowerCamelCase_ : List[str] = load_demo_image(image_size=lowerCAmelCase_ , device="cpu")
lowerCamelCase_ : List[Any] = BertTokenizer.from_pretrained("bert-base-uncased")
lowerCamelCase_ : int = tokenizer(["a picture of"]).input_ids
lowerCamelCase_ : Optional[Any] = hf_model.generate(lowerCAmelCase_ , lowerCAmelCase_)
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowerCamelCase_ : Tuple = hf_model.generate(lowerCAmelCase_)
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCAmelCase_)
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowerCamelCase_ : int = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
lowerCamelCase_ : Optional[Any] = blip_vqa(pretrained=lowerCAmelCase_ , image_size=lowerCAmelCase_ , vit="base")
vqa_model.eval()
lowerCamelCase_ : int = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowerCamelCase_ : Tuple = modified_state_dict.pop(lowerCAmelCase_)
lowerCamelCase_ : List[Any] = rename_key(lowerCAmelCase_)
lowerCamelCase_ : Optional[int] = value
lowerCamelCase_ : List[Any] = BlipForQuestionAnswering(lowerCAmelCase_)
hf_vqa_model.load_state_dict(lowerCAmelCase_)
lowerCamelCase_ : Tuple = ["How many dogs are in this image?"]
lowerCamelCase_ : List[str] = tokenizer(lowerCAmelCase_ , return_tensors="pt").input_ids
lowerCamelCase_ : List[str] = hf_vqa_model.generate(lowerCAmelCase_ , lowerCAmelCase_)
print(tokenizer.decode(answer[0]))
assert tokenizer.decode(answer[0]) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa")
lowerCamelCase_ : List[Any] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
lowerCamelCase_ : Union[str, Any] = blip_itm(pretrained=lowerCAmelCase_ , image_size=lowerCAmelCase_ , vit="base")
itm_model.eval()
lowerCamelCase_ : Union[str, Any] = itm_model.state_dict()
for key in modified_state_dict.copy():
lowerCamelCase_ : List[Any] = modified_state_dict.pop(lowerCAmelCase_)
lowerCamelCase_ : Optional[int] = rename_key(lowerCAmelCase_)
lowerCamelCase_ : Dict = value
lowerCamelCase_ : Optional[Any] = BlipForImageTextRetrieval(lowerCAmelCase_)
lowerCamelCase_ : Optional[int] = ["A picture of a woman with a dog sitting in a beach"]
lowerCamelCase_ : List[Any] = tokenizer(
lowerCAmelCase_ , return_tensors="pt" , padding="max_length" , truncation=lowerCAmelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCAmelCase_)
hf_itm_model.eval()
lowerCamelCase_ : Optional[int] = hf_itm_model(lowerCAmelCase_ , lowerCAmelCase_ , use_itm_head=lowerCAmelCase_)
lowerCamelCase_ : Dict = hf_itm_model(lowerCAmelCase_ , lowerCAmelCase_ , use_itm_head=lowerCAmelCase_)
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1)[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__magic_name__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 73 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''EncodecFeatureExtractor'''
__UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
lowerCamelCase_ : Optional[Any] = self.feature_extractor
lowerCamelCase_ : Optional[int] = False
def _UpperCamelCase ( self , a_=None , a_=None , a_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ )
def __call__( self , *a_ , **a_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
lowerCamelCase_ : str = kwargs.pop("audio" , a_ )
lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : int = args[0]
lowerCamelCase_ : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ )
if audio is not None:
lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCamelCase_ : Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCamelCase_ : int = audio_inputs["padding_mask"]
return inputs
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : Optional[int] = args[0]
lowerCamelCase_ : Optional[Any] = args[1:]
if audio_values is not None:
return self._decode_audio(a_ , padding_mask=a_ )
else:
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Any = to_numpy(a_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape
if padding_mask is None:
return list(a_ )
lowerCamelCase_ : Tuple = to_numpy(a_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1]
lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value
lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ )
lowerCamelCase_ : str = audio_values.tolist()
for i in range(a_ ):
lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 )
return audio_values
| 73 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__magic_name__ = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
__magic_name__ = None
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0.")
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file.")
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions.")
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout).")
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer.")
parser.add_argument(
"--na-prob-thresh" , "-t" , type=lowerCAmelCase_ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=lowerCAmelCase_ , help="Save precision-recall curves to directory.")
parser.add_argument("--verbose" , "-v" , action="store_true")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase_ : Dict = bool(qa["answers"]["text"])
return qid_to_has_ans
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
def remove_articles(lowerCAmelCase_):
return ARTICLES_REGEX.sub(" " , lowerCAmelCase_)
def white_space_fix(lowerCAmelCase_):
return " ".join(text.split())
def remove_punc(lowerCAmelCase_):
lowerCamelCase_ : List[Any] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCAmelCase_):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase_))))
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if not s:
return []
return normalize_answer(lowerCAmelCase_).split()
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return int(normalize_answer(lowerCAmelCase_) == normalize_answer(lowerCAmelCase_))
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : int = get_tokens(lowerCAmelCase_)
lowerCamelCase_ : Tuple = get_tokens(lowerCAmelCase_)
lowerCamelCase_ : Any = collections.Counter(lowerCAmelCase_) & collections.Counter(lowerCAmelCase_)
lowerCamelCase_ : Union[str, Any] = sum(common.values())
if len(lowerCAmelCase_) == 0 or len(lowerCAmelCase_) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
lowerCamelCase_ : Optional[Any] = 1.0 * num_same / len(lowerCAmelCase_)
lowerCamelCase_ : Any = 1.0 * num_same / len(lowerCAmelCase_)
lowerCamelCase_ : Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : str = {}
lowerCamelCase_ : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase_ : Union[str, Any] = qa["id"]
lowerCamelCase_ : Tuple = [t for t in qa["answers"]["text"] if normalize_answer(lowerCAmelCase_)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCamelCase_ : Dict = [""]
if qid not in preds:
print(F"""Missing prediction for {qid}""")
continue
lowerCamelCase_ : str = preds[qid]
# Take max over all gold answers
lowerCamelCase_ : int = max(compute_exact(lowerCAmelCase_ , lowerCAmelCase_) for a in gold_answers)
lowerCamelCase_ : List[str] = max(compute_fa(lowerCAmelCase_ , lowerCAmelCase_) for a in gold_answers)
return exact_scores, fa_scores
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : int = {}
for qid, s in scores.items():
lowerCamelCase_ : Union[str, Any] = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCamelCase_ : Any = float(not qid_to_has_ans[qid])
else:
lowerCamelCase_ : Dict = s
return new_scores
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None):
'''simple docstring'''
if not qid_list:
lowerCamelCase_ : List[Any] = len(lowerCAmelCase_)
return collections.OrderedDict(
[
("exact", 1_00.0 * sum(exact_scores.values()) / total),
("f1", 1_00.0 * sum(fa_scores.values()) / total),
("total", total),
])
else:
lowerCamelCase_ : Tuple = len(lowerCAmelCase_)
return collections.OrderedDict(
[
("exact", 1_00.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 1_00.0 * sum(fa_scores[k] for k in qid_list) / total),
("total", total),
])
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
for k in new_eval:
lowerCamelCase_ : Tuple = new_eval[k]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
plt.step(lowerCAmelCase_ , lowerCAmelCase_ , color="b" , alpha=0.2 , where="post")
plt.fill_between(lowerCAmelCase_ , lowerCAmelCase_ , step="post" , alpha=0.2 , color="b")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(lowerCAmelCase_)
plt.savefig(lowerCAmelCase_)
plt.clf()
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: na_probs[k])
lowerCamelCase_ : List[Any] = 0.0
lowerCamelCase_ : List[Any] = 1.0
lowerCamelCase_ : Optional[Any] = 0.0
lowerCamelCase_ : Union[str, Any] = [1.0]
lowerCamelCase_ : int = [0.0]
lowerCamelCase_ : Tuple = 0.0
for i, qid in enumerate(lowerCAmelCase_):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCamelCase_ : List[Any] = true_pos / float(i + 1)
lowerCamelCase_ : List[str] = true_pos / float(lowerCAmelCase_)
if i == len(lowerCAmelCase_) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCAmelCase_)
recalls.append(lowerCAmelCase_)
if out_image:
plot_pr_curve(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return {"ap": 1_00.0 * avg_prec}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if out_image_dir and not os.path.exists(lowerCAmelCase_):
os.makedirs(lowerCAmelCase_)
lowerCamelCase_ : Any = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
lowerCamelCase_ : Union[str, Any] = make_precision_recall_eval(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , out_image=os.path.join(lowerCAmelCase_ , "pr_exact.png") , title="Precision-Recall curve for Exact Match score" , )
lowerCamelCase_ : Dict = make_precision_recall_eval(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , out_image=os.path.join(lowerCAmelCase_ , "pr_f1.png") , title="Precision-Recall curve for F1 score" , )
lowerCamelCase_ : Any = {k: float(lowerCAmelCase_) for k, v in qid_to_has_ans.items()}
lowerCamelCase_ : Any = make_precision_recall_eval(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , out_image=os.path.join(lowerCAmelCase_ , "pr_oracle.png") , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , "pr_exact")
merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , "pr_f1")
merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , "pr_oracle")
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if not qid_list:
return
lowerCamelCase_ : Tuple = [na_probs[k] for k in qid_list]
lowerCamelCase_ : str = np.ones_like(lowerCAmelCase_) / float(len(lowerCAmelCase_))
plt.hist(lowerCAmelCase_ , weights=lowerCAmelCase_ , bins=20 , range=(0.0, 1.0))
plt.xlabel("Model probability of no-answer")
plt.ylabel("Proportion of dataset")
plt.title(F"""Histogram of no-answer probability: {name}""")
plt.savefig(os.path.join(lowerCAmelCase_ , F"""na_prob_hist_{name}.png"""))
plt.clf()
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Dict = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
lowerCamelCase_ : Optional[Any] = num_no_ans
lowerCamelCase_ : Union[str, Any] = cur_score
lowerCamelCase_ : List[str] = 0.0
lowerCamelCase_ : str = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: na_probs[k])
for i, qid in enumerate(lowerCAmelCase_):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCamelCase_ : Tuple = scores[qid]
else:
if preds[qid]:
lowerCamelCase_ : str = -1
else:
lowerCamelCase_ : int = 0
cur_score += diff
if cur_score > best_score:
lowerCamelCase_ : int = cur_score
lowerCamelCase_ : List[Any] = na_probs[qid]
return 1_00.0 * best_score / len(lowerCAmelCase_), best_thresh
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = find_best_thresh(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = find_best_thresh(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : Optional[int] = best_exact
lowerCamelCase_ : List[str] = exact_thresh
lowerCamelCase_ : Optional[Any] = best_fa
lowerCamelCase_ : List[str] = fa_thresh
def __magic_name__ ( ):
'''simple docstring'''
with open(OPTS.data_file) as f:
lowerCamelCase_ : str = json.load(lowerCAmelCase_)
lowerCamelCase_ : Tuple = dataset_json["data"]
with open(OPTS.pred_file) as f:
lowerCamelCase_ : Optional[Any] = json.load(lowerCAmelCase_)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
lowerCamelCase_ : Optional[int] = json.load(lowerCAmelCase_)
else:
lowerCamelCase_ : str = {k: 0.0 for k in preds}
lowerCamelCase_ : Union[str, Any] = make_qid_to_has_ans(lowerCAmelCase_) # maps qid to True/False
lowerCamelCase_ : Optional[Any] = [k for k, v in qid_to_has_ans.items() if v]
lowerCamelCase_ : Any = [k for k, v in qid_to_has_ans.items() if not v]
lowerCamelCase_ ,lowerCamelCase_ : int = get_raw_scores(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : Tuple = apply_no_ans_threshold(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , OPTS.na_prob_thresh)
lowerCamelCase_ : Optional[int] = apply_no_ans_threshold(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , OPTS.na_prob_thresh)
lowerCamelCase_ : int = make_eval_dict(lowerCAmelCase_ , lowerCAmelCase_)
if has_ans_qids:
lowerCamelCase_ : List[Any] = make_eval_dict(lowerCAmelCase_ , lowerCAmelCase_ , qid_list=lowerCAmelCase_)
merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , "HasAns")
if no_ans_qids:
lowerCamelCase_ : Union[str, Any] = make_eval_dict(lowerCAmelCase_ , lowerCAmelCase_ , qid_list=lowerCAmelCase_)
merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , "NoAns")
if OPTS.na_prob_file:
find_all_best_thresh(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , OPTS.out_image_dir)
histogram_na_prob(lowerCAmelCase_ , lowerCAmelCase_ , OPTS.out_image_dir , "hasAns")
histogram_na_prob(lowerCAmelCase_ , lowerCAmelCase_ , OPTS.out_image_dir , "noAns")
if OPTS.out_file:
with open(OPTS.out_file , "w") as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_)
else:
print(json.dumps(lowerCAmelCase_ , indent=2))
if __name__ == "__main__":
__magic_name__ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 73 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase_) , lowerCAmelCase_)
return number - int(lowerCAmelCase_)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 73 | 1 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__UpperCAmelCase : Dict = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : Dict = AudioClassificationPipeline(model=a_ , feature_extractor=a_ )
# test with a raw waveform
lowerCamelCase_ : List[str] = np.zeros((3_4000,) )
lowerCamelCase_ : Tuple = np.zeros((1_4000,) )
return audio_classifier, [audioa, audio]
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ ,lowerCamelCase_ : Tuple = examples
lowerCamelCase_ : List[Any] = audio_classifier(a_ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a_ , [
{"score": ANY(a_ ), "label": ANY(a_ )},
{"score": ANY(a_ ), "label": ANY(a_ )},
] , )
lowerCamelCase_ : Tuple = audio_classifier(a_ , top_k=1 )
self.assertEqual(
a_ , [
{"score": ANY(a_ ), "label": ANY(a_ )},
] , )
self.run_torchaudio(a_ )
@require_torchaudio
def _UpperCamelCase ( self , a_ ):
import datasets
# test with a local file
lowerCamelCase_ : Union[str, Any] = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
lowerCamelCase_ : Optional[int] = dataset[0]["audio"]["array"]
lowerCamelCase_ : Dict = audio_classifier(a_ )
self.assertEqual(
a_ , [
{"score": ANY(a_ ), "label": ANY(a_ )},
{"score": ANY(a_ ), "label": ANY(a_ )},
] , )
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = "anton-l/wav2vec2-random-tiny-classifier"
lowerCamelCase_ : Optional[int] = pipeline("audio-classification" , model=a_ )
lowerCamelCase_ : Dict = np.ones((8000,) )
lowerCamelCase_ : Tuple = audio_classifier(a_ , top_k=4 )
lowerCamelCase_ : Union[str, Any] = [
{"score": 0.08_42, "label": "no"},
{"score": 0.08_38, "label": "up"},
{"score": 0.08_37, "label": "go"},
{"score": 0.08_34, "label": "right"},
]
lowerCamelCase_ : Any = [
{"score": 0.08_45, "label": "stop"},
{"score": 0.08_44, "label": "on"},
{"score": 0.08_41, "label": "right"},
{"score": 0.08_34, "label": "left"},
]
self.assertIn(nested_simplify(a_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowerCamelCase_ : str = {"array": np.ones((8000,) ), "sampling_rate": audio_classifier.feature_extractor.sampling_rate}
lowerCamelCase_ : Dict = audio_classifier(a_ , top_k=4 )
self.assertIn(nested_simplify(a_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _UpperCamelCase ( self ):
import datasets
lowerCamelCase_ : Any = "superb/wav2vec2-base-superb-ks"
lowerCamelCase_ : List[str] = pipeline("audio-classification" , model=a_ )
lowerCamelCase_ : int = datasets.load_dataset("anton-l/superb_dummy" , "ks" , split="test" )
lowerCamelCase_ : Dict = np.array(dataset[3]["speech"] , dtype=np.floataa )
lowerCamelCase_ : Any = audio_classifier(a_ , top_k=4 )
self.assertEqual(
nested_simplify(a_ , decimals=3 ) , [
{"score": 0.9_81, "label": "go"},
{"score": 0.0_07, "label": "up"},
{"score": 0.0_06, "label": "_unknown_"},
{"score": 0.0_01, "label": "down"},
] , )
@require_tf
@unittest.skip("Audio classification is not implemented for TF" )
def _UpperCamelCase ( self ):
pass
| 73 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ):
lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ : str = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : List[str] = apply_ocr
def _UpperCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "apply_ocr" ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# with apply_OCR = True
lowerCamelCase_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 73 | 1 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = '''▁'''
__magic_name__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = BigBirdTokenizer
__UpperCAmelCase : List[Any] = BigBirdTokenizerFast
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Optional[int] = True
def _UpperCamelCase ( self ):
super().setUp()
lowerCamelCase_ : Optional[Any] = self.tokenizer_class(a_ , keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = "<s>"
lowerCamelCase_ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(a_ ) , 1004 )
def _UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Optional[int] = self.get_rust_tokenizer()
lowerCamelCase_ : Dict = "I was born in 92000, and this is falsé."
lowerCamelCase_ : List[str] = tokenizer.tokenize(a_ )
lowerCamelCase_ : Optional[Any] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
lowerCamelCase_ : int = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Union[str, Any] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
lowerCamelCase_ : int = self.get_rust_tokenizer()
lowerCamelCase_ : Any = tokenizer.encode(a_ )
lowerCamelCase_ : int = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = BigBirdTokenizer(a_ , keep_accents=a_ )
lowerCamelCase_ : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [285, 46, 10, 170, 382] , )
lowerCamelCase_ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _UpperCamelCase ( self ):
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = "Hello World!"
lowerCamelCase_ : Tuple = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
lowerCamelCase_ : Optional[Any] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@require_torch
@slow
def _UpperCamelCase ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowerCamelCase_ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase_ : List[Any] = " ".join(a_ )
lowerCamelCase_ : Tuple = self.big_tokenizer.encode_plus(a_ , return_tensors="pt" , return_token_type_ids=a_ )
lowerCamelCase_ : str = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=a_ )
lowerCamelCase_ : Optional[Any] = BigBirdConfig(attention_type="original_full" )
lowerCamelCase_ : Any = BigBirdModel(a_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a_ )
model(**a_ )
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
lowerCamelCase_ : Union[str, Any] = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def _UpperCamelCase ( self ):
# fmt: off
lowerCamelCase_ : Tuple = {"input_ids": [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 73 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''luke'''
def __init__( self , a_=5_0267 , a_=50_0000 , a_=768 , a_=256 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=True , a_=None , a_=1 , a_=0 , a_=2 , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : Optional[int] = entity_vocab_size
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Dict = entity_emb_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Tuple = intermediate_size
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Optional[Any] = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : Optional[int] = use_entity_aware_attention
lowerCamelCase_ : str = classifier_dropout
| 73 | 1 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ):
lowerCamelCase_ : int = parent
lowerCamelCase_ : int = batch_size
lowerCamelCase_ : Union[str, Any] = seq_length
lowerCamelCase_ : List[Any] = is_training
lowerCamelCase_ : str = use_input_mask
lowerCamelCase_ : str = use_token_type_ids
lowerCamelCase_ : str = use_labels
lowerCamelCase_ : Optional[Any] = vocab_size
lowerCamelCase_ : List[str] = hidden_size
lowerCamelCase_ : Tuple = num_hidden_layers
lowerCamelCase_ : str = num_attention_heads
lowerCamelCase_ : Union[str, Any] = intermediate_size
lowerCamelCase_ : Any = hidden_act
lowerCamelCase_ : List[str] = hidden_dropout_prob
lowerCamelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase_ : int = max_position_embeddings
lowerCamelCase_ : Dict = type_vocab_size
lowerCamelCase_ : int = type_sequence_label_size
lowerCamelCase_ : str = initializer_range
lowerCamelCase_ : Dict = num_labels
lowerCamelCase_ : Dict = num_choices
lowerCamelCase_ : int = scope
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Any = None
if self.use_input_mask:
lowerCamelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : Optional[int] = None
if self.use_token_type_ids:
lowerCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : Tuple = None
lowerCamelCase_ : Optional[int] = None
lowerCamelCase_ : Optional[int] = None
if self.use_labels:
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , use_stable_embedding=a_ , )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : int = OpenLlamaModel(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : int = model(a_ , attention_mask=a_ )
lowerCamelCase_ : Tuple = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : str = OpenLlamaModel(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Any = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
lowerCamelCase_ : str = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , )
lowerCamelCase_ : int = model(a_ , attention_mask=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCamelCase_ : Tuple = OpenLlamaForCausalLM(config=a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Optional[int] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
lowerCamelCase_ : int = True
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : List[Any] = OpenLlamaForCausalLM(config=a_ )
model.to(a_ )
model.eval()
# first forward pass
lowerCamelCase_ : Union[str, Any] = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , use_cache=a_ , )
lowerCamelCase_ : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase_ : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase_ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase_ : Any = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , output_hidden_states=a_ , )["hidden_states"][0]
lowerCamelCase_ : Union[str, Any] = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , past_key_values=a_ , output_hidden_states=a_ , )["hidden_states"][0]
# select random slice
lowerCamelCase_ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ : int = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase_ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1E-3 ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) : Optional[Any] = config_and_inputs
lowerCamelCase_ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase : Dict = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase : Tuple = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Dict = False
__UpperCAmelCase : str = False
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = OpenLlamaModelTester(self )
lowerCamelCase_ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ : Union[str, Any] = type
self.model_tester.create_and_check_model(*a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Optional[int] = 3
lowerCamelCase_ : Dict = input_dict["input_ids"]
lowerCamelCase_ : str = input_ids.ne(1 ).to(a_ )
lowerCamelCase_ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase_ : Optional[Any] = OpenLlamaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Tuple = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self ):
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : int = 3
lowerCamelCase_ : List[str] = "single_label_classification"
lowerCamelCase_ : int = input_dict["input_ids"]
lowerCamelCase_ : Tuple = input_ids.ne(1 ).to(a_ )
lowerCamelCase_ : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase_ : List[Any] = OpenLlamaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self ):
lowerCamelCase_ ,lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Any = 3
lowerCamelCase_ : List[Any] = "multi_label_classification"
lowerCamelCase_ : List[str] = input_dict["input_ids"]
lowerCamelCase_ : List[Any] = input_ids.ne(1 ).to(a_ )
lowerCamelCase_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase_ : str = OpenLlamaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowerCamelCase_ : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def _UpperCamelCase ( self ):
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Dict = ids_tensor([1, 10] , config.vocab_size )
lowerCamelCase_ : Optional[int] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase_ : Union[str, Any] = OpenLlamaModel(a_ )
original_model.to(a_ )
original_model.eval()
lowerCamelCase_ : Union[str, Any] = original_model(a_ ).last_hidden_state
lowerCamelCase_ : str = original_model(a_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase_ : str = {"type": scaling_type, "factor": 10.0}
lowerCamelCase_ : Optional[Any] = OpenLlamaModel(a_ )
scaled_model.to(a_ )
scaled_model.eval()
lowerCamelCase_ : Tuple = scaled_model(a_ ).last_hidden_state
lowerCamelCase_ : List[Any] = scaled_model(a_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a_ , a_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a_ , a_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a_ , a_ , atol=1E-5 ) )
| 73 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : Optional[datasets.Features] = None
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
import pyspark
def generate_fn():
lowerCamelCase_ : Dict = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id"))
for partition_id in partition_order:
lowerCamelCase_ : Dict = df_with_partition_id.select("*").where(F"""part_id = {partition_id}""").drop("part_id")
lowerCamelCase_ : Dict = partition_df.collect()
lowerCamelCase_ : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , a_ , a_=None , ):
lowerCamelCase_ : Dict = df
lowerCamelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase_ : int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Dict = self.split_shard_indices_by_worker(a_ , a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
@property
def _UpperCamelCase ( self ):
return len(self.partition_order )
class lowerCAmelCase__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__UpperCAmelCase : Any = SparkConfig
def __init__( self , a_ , a_ = None , a_ = None , **a_ , ):
import pyspark
lowerCamelCase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase_ : Optional[Any] = df
lowerCamelCase_ : List[Any] = working_dir
super().__init__(
cache_dir=a_ , config_name=str(self.df.semanticHash() ) , **a_ , )
def _UpperCamelCase ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(a_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a_ )
lowerCamelCase_ : Optional[Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a_ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase_ : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _UpperCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self , a_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _UpperCamelCase ( self , a_ ):
import pyspark
def get_arrow_batch_size(a_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
lowerCamelCase_ : str = self.df.count()
lowerCamelCase_ : List[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase_ : Any = (
self.df.limit(a_ )
.repartition(1 )
.mapInArrow(a_ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase_ : int = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase_ : Union[str, Any] = min(a_ , int(approx_total_size / max_shard_size ) )
lowerCamelCase_ : int = self.df.repartition(a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , ):
import pyspark
lowerCamelCase_ : str = ParquetWriter if file_format == "parquet" else ArrowWriter
lowerCamelCase_ : int = os.path.join(self._working_dir , os.path.basename(a_ ) ) if self._working_dir else fpath
lowerCamelCase_ : Optional[Any] = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase_ : int = self.config.features
lowerCamelCase_ : Any = self._writer_batch_size
lowerCamelCase_ : Tuple = self._fs.storage_options
def write_arrow(a_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId()
lowerCamelCase_ : Optional[int] = next(a_ , a_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Optional[int] = writer_class(
features=a_ , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(a_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase_ ,lowerCamelCase_ : List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
lowerCamelCase_ : List[str] = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(a_ )
if writer._num_bytes > 0:
lowerCamelCase_ ,lowerCamelCase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a_ ) ):
lowerCamelCase_ : str = os.path.join(os.path.dirname(a_ ) , os.path.basename(a_ ) )
shutil.move(a_ , a_ )
lowerCamelCase_ : int = (
self.df.mapInArrow(a_ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _UpperCamelCase ( self , a_ , a_ = "arrow" , a_ = None , a_ = None , **a_ , ):
self._validate_cache_dir()
lowerCamelCase_ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a_ )
lowerCamelCase_ : Dict = not is_remote_filesystem(self._fs )
lowerCamelCase_ : List[str] = os.path.join if is_local else posixpath.join
lowerCamelCase_ : Any = "-TTTTT-SSSSS-of-NNNNN"
lowerCamelCase_ : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
lowerCamelCase_ : int = path_join(self._output_dir , a_ )
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : int = 0
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Any = []
for task_id, content in self._prepare_split_single(a_ , a_ , a_ ):
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a_ )
lowerCamelCase_ : Dict = total_num_examples
lowerCamelCase_ : Any = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
lowerCamelCase_ : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase_ : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a_ , a_ , a_ , ):
rename(
a_ , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , )
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : Dict = 0
for i in range(len(a_ ) ):
lowerCamelCase_ ,lowerCamelCase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(a_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a_ , len(a_ ) ).map(lambda a_ : _rename_shard(*a_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[int] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(a_ , "" ) , )
def _UpperCamelCase ( self , a_ , ):
return SparkExamplesIterable(self.df )
| 73 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf)
lowerCamelCase_ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt))
lowerCamelCase_ : Optional[int] = new_cost_f
lowerCamelCase_ : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = -1
lowerCamelCase_ : Tuple = set()
lowerCamelCase_ : Dict = set()
lowerCamelCase_ : int = {source: 0}
lowerCamelCase_ : str = {destination: 0}
lowerCamelCase_ : Tuple = {source: None}
lowerCamelCase_ : Dict = {destination: None}
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : List[str] = np.inf
queue_forward.put((0, source))
queue_backward.put((0, destination))
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get()
visited_forward.add(lowerCAmelCase_)
lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get()
visited_backward.add(lowerCAmelCase_)
lowerCamelCase_ : Any = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
lowerCamelCase_ : Dict = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase_ : Union[str, Any] = shortest_distance
return shortest_path_distance
__magic_name__ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__magic_name__ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(a_ ):
lowerCamelCase_ : List[Any] = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
lowerCamelCase_ : Any = FlaxAutoModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def _UpperCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(a_ ):
lowerCamelCase_ : List[Any] = AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
lowerCamelCase_ : List[str] = FlaxAutoModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def _UpperCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(a_ )
lowerCamelCase_ : str = FlaxBertModel.from_pretrained(a_ )
lowerCamelCase_ : List[str] = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**a_ ):
return model(**a_ )
eval(**a_ ).block_until_ready()
@slow
def _UpperCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
lowerCamelCase_ : List[Any] = AutoTokenizer.from_pretrained(a_ )
lowerCamelCase_ : Any = FlaxRobertaModel.from_pretrained(a_ )
lowerCamelCase_ : List[str] = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**a_ ):
return model(**a_ )
eval(**a_ ).block_until_ready()
def _UpperCamelCase ( self ):
with self.assertRaisesRegex(
a_ , "bert-base is not a local folder and is not a valid model identifier" ):
lowerCamelCase_ : Optional[Any] = FlaxAutoModel.from_pretrained("bert-base" )
def _UpperCamelCase ( self ):
with self.assertRaisesRegex(
a_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowerCamelCase_ : int = FlaxAutoModel.from_pretrained(a_ , revision="aaaaaa" )
def _UpperCamelCase ( self ):
with self.assertRaisesRegex(
a_ , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ):
lowerCamelCase_ : List[Any] = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def _UpperCamelCase ( self ):
with self.assertRaisesRegex(a_ , "Use `from_pt=True` to load this model" ):
lowerCamelCase_ : List[str] = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 73 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ctrl'''
__UpperCAmelCase : Dict = ['''past_key_values''']
__UpperCAmelCase : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a_=24_6534 , a_=256 , a_=1280 , a_=8192 , a_=48 , a_=16 , a_=0.1 , a_=0.1 , a_=1E-6 , a_=0.02 , a_=True , **a_ , ):
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : Any = n_positions
lowerCamelCase_ : Optional[int] = n_embd
lowerCamelCase_ : List[Any] = n_layer
lowerCamelCase_ : Union[str, Any] = n_head
lowerCamelCase_ : str = dff
lowerCamelCase_ : Tuple = resid_pdrop
lowerCamelCase_ : Any = embd_pdrop
lowerCamelCase_ : Dict = layer_norm_epsilon
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : Any = use_cache
super().__init__(**a_ )
| 73 | 1 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase : Tuple = ['''accelerate''', '''launch''']
__UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase : int = '''default_config.yaml'''
__UpperCAmelCase : Tuple = config_folder / config_file
__UpperCAmelCase : int = config_folder / '''_default_config.yaml'''
__UpperCAmelCase : int = Path('''tests/test_configs''' )
@classmethod
def _UpperCamelCase ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCamelCase ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=a_ ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''test-tpu'''
__UpperCAmelCase : Tuple = '''us-central1-a'''
__UpperCAmelCase : Tuple = '''ls'''
__UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase : Dict = '''cd /usr/share'''
__UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh'''
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
| 73 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCamelCase )}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
__UpperCAmelCase : str = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
def _UpperCamelCase ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(default=__lowerCamelCase, metadata={'''help''': '''The input training data file (a text file).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__UpperCAmelCase : Optional[int] = field(
default=5, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
__UpperCAmelCase : float = field(
default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
def _UpperCamelCase ( self ):
if self.train_file is not None:
lowerCamelCase_ : str = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase_ : Union[str, Any] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "r" , encoding="utf-8") as f:
lowerCamelCase_ : Tuple = [json.loads(lowerCAmelCase_) for line in f.read().splitlines() if (len(lowerCAmelCase_) > 0 and not line.isspace())]
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
lowerCamelCase_ : Any = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase_ : List[Any] = refs
return Dataset.from_dict(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase_ : List[str] = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : Dict = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""")
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name)
if "validation" not in datasets.keys():
lowerCamelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
lowerCamelCase_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
lowerCamelCase_ : Dict = {}
if data_args.train_file is not None:
lowerCamelCase_ : str = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Any = data_args.validation_file
lowerCamelCase_ : Any = data_args.train_file.split(".")[-1]
if extension == "txt":
lowerCamelCase_ : List[str] = "text"
lowerCamelCase_ : Dict = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : Optional[Any] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""")
config.update_from_string(model_args.config_overrides)
logger.info(F"""New config: {config}""")
lowerCamelCase_ : List[str] = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name.")
if model_args.model_name_or_path:
lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch")
lowerCamelCase_ : Dict = AutoModelForMaskedLM.from_config(lowerCAmelCase_)
model.resize_token_embeddings(len(lowerCAmelCase_))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase_ : Optional[Any] = datasets["train"].column_names
else:
lowerCamelCase_ : Dict = datasets["validation"].column_names
lowerCamelCase_ : Union[str, Any] = "text" if "text" in column_names else column_names[0]
lowerCamelCase_ : Optional[Any] = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_):
# Remove empty lines
lowerCamelCase_ : str = [line for line in examples["text"] if len(lowerCAmelCase_) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length)
lowerCamelCase_ : str = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase_ : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file)
if data_args.validation_ref_file is not None:
lowerCamelCase_ : List[str] = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file)
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase_ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase_ : Union[str, Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase_ : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
lowerCamelCase_ : int = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase_ : Dict = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
lowerCamelCase_ : Dict = model_args.model_name_or_path
else:
lowerCamelCase_ : int = None
lowerCamelCase_ : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_)
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Tuple = os.path.join(training_args.output_dir , "train_results.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(train_result.metrics.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json"))
# Evaluation
lowerCamelCase_ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
lowerCamelCase_ : Tuple = trainer.evaluate()
lowerCamelCase_ : str = math.exp(eval_output["eval_loss"])
lowerCamelCase_ : Tuple = perplexity
lowerCamelCase_ : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Eval results *****")
for key, value in sorted(results.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
return results
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 73 | 1 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name)
__magic_name__ = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
@staticmethod
def _UpperCamelCase ( a_ ):
lowerCamelCase_ : List[Any] = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=a_ , required=a_ , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=a_ , required=a_ , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=a_ , required=a_ , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=a_ , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=a_ , default=a_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=a_ )
def __init__( self , a_ , a_ , a_ , a_ , a_ , *a_ , ):
lowerCamelCase_ : str = logging.get_logger("transformers-cli/converting" )
self._logger.info(F"""Loading model {model_type}""" )
lowerCamelCase_ : List[str] = model_type
lowerCamelCase_ : Dict = tf_checkpoint
lowerCamelCase_ : List[str] = pytorch_dump_output
lowerCamelCase_ : Optional[Any] = config
lowerCamelCase_ : Optional[int] = finetuning_task_name
def _UpperCamelCase ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(a_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
if "ckpt" in self._tf_checkpoint.lower():
lowerCamelCase_ : int = self._tf_checkpoint
lowerCamelCase_ : Optional[int] = ""
else:
lowerCamelCase_ : int = self._tf_checkpoint
lowerCamelCase_ : Optional[int] = ""
convert_transfo_xl_checkpoint_to_pytorch(
a_ , self._config , self._pytorch_dump_output , a_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 73 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
# setable values
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _UpperCamelCase ( cls ):
return cls()
@dataclass
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : KarrasVeSchedulerState
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
return True
@register_to_config
def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.0_07 , a_ = 80 , a_ = 0.05 , a_ = 50 , ):
pass
def _UpperCamelCase ( self ):
return KarrasVeSchedulerState.create()
def _UpperCamelCase ( self , a_ , a_ , a_ = () ):
lowerCamelCase_ : List[Any] = jnp.arange(0 , a_ )[::-1].copy()
lowerCamelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase_ : Union[str, Any] = random.split(a_ , num=1 )
lowerCamelCase_ : str = self.config.s_noise * random.normal(key=a_ , shape=sample.shape )
lowerCamelCase_ : List[str] = sigma + gamma * sigma
lowerCamelCase_ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCamelCase_ : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output
lowerCamelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
raise NotImplementedError()
| 73 | 1 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
__magic_name__ : Union[str, Any] = namedtuple('''covid_data''', '''cases deaths recovered''')
def __magic_name__ ( lowerCAmelCase_ = "https://www.worldometers.info/coronavirus/"):
'''simple docstring'''
lowerCamelCase_ : Tuple = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(SCREAMING_SNAKE_CASE_).content).xpath(SCREAMING_SNAKE_CASE_))
__magic_name__ : Tuple = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 700 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = StableDiffusionDiffEditPipeline
__UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
__UpperCAmelCase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : List[str] = frozenset([] )
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , )
lowerCamelCase_ : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
lowerCamelCase_ : Dict = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_zero=a_ , )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase_ : Optional[Any] = CLIPTextModel(a_ )
lowerCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : str = floats_tensor((1, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : List[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : List[Any] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Tuple = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Any = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Tuple = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : int = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Optional[int] = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Optional[int] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Union[str, Any] = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self ):
if not hasattr(self.pipeline_class , "_optional_components" ):
return
lowerCamelCase_ : List[Any] = self.get_dummy_components()
lowerCamelCase_ : int = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a_ , a_ , a_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCamelCase_ : int = self.get_dummy_inputs(a_ )
lowerCamelCase_ : int = pipe(**a_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a_ )
lowerCamelCase_ : Optional[int] = self.pipeline_class.from_pretrained(a_ )
pipe_loaded.to(a_ )
pipe_loaded.set_progress_bar_config(disable=a_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a_ , a_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCamelCase_ : List[str] = self.get_dummy_inputs(a_ )
lowerCamelCase_ : Optional[int] = pipe_loaded(**a_ )[0]
lowerCamelCase_ : Optional[int] = np.abs(output - output_loaded ).max()
self.assertLess(a_ , 1E-4 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : List[Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = self.get_dummy_mask_inputs(a_ )
lowerCamelCase_ : int = pipe.generate_mask(**a_ )
lowerCamelCase_ : List[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCamelCase_ : List[str] = np.array([0] * 9 )
lowerCamelCase_ : Optional[int] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Dict = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : Dict = pipe.invert(**a_ ).images
lowerCamelCase_ : str = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Dict = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
def _UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : int = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"}
lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler(**a_ )
lowerCamelCase_ : List[str] = DPMSolverMultistepInverseScheduler(**a_ )
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : int = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : str = pipe.invert(**a_ ).images
lowerCamelCase_ : int = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Union[str, Any] = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _UpperCamelCase ( cls ):
lowerCamelCase_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
lowerCamelCase_ : int = raw_image.convert("RGB" ).resize((768, 768) )
lowerCamelCase_ : List[Any] = raw_image
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = torch.manual_seed(0 )
lowerCamelCase_ : Tuple = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : str = "a bowl of fruit"
lowerCamelCase_ : Optional[int] = "a bowl of pears"
lowerCamelCase_ : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ ).latents
lowerCamelCase_ : List[str] = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = "a bowl of fruit"
lowerCamelCase_ : Dict = "a bowl of pears"
lowerCamelCase_ : Optional[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ , num_inference_steps=25 , ).latents
lowerCamelCase_ : Any = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 73 | 0 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__magic_name__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__magic_name__ = logging.getLogger()
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("-f")
lowerCamelCase_ : List[Any] = parser.parse_args()
return args.f
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_="eval"):
'''simple docstring'''
lowerCamelCase_ : int = os.path.join(__A , F"""{split}_results.json""")
if os.path.exists(__A):
with open(__A , "r") as f:
return json.load(__A)
raise ValueError(F"""can't find {path}""")
__magic_name__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase__ ( _snake_case ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = self.get_auto_remove_tmp_dir()
lowerCamelCase_ : str = F"""\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n """.split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_flax_glue.main()
lowerCamelCase_ : List[Any] = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.get_auto_remove_tmp_dir()
lowerCamelCase_ : List[str] = F"""\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n """.split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_clm_flax.main()
lowerCamelCase_ : List[str] = get_results(lowerCAmelCase__ )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self.get_auto_remove_tmp_dir()
lowerCamelCase_ : Union[str, Any] = F"""\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n """.split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_summarization_flax.main()
lowerCamelCase_ : int = get_results(lowerCAmelCase__ , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.get_auto_remove_tmp_dir()
lowerCamelCase_ : Optional[Any] = F"""\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n """.split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_mlm_flax.main()
lowerCamelCase_ : Union[str, Any] = get_results(lowerCAmelCase__ )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.get_auto_remove_tmp_dir()
lowerCamelCase_ : Optional[Any] = F"""\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n """.split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_ta_mlm_flax.main()
lowerCamelCase_ : Optional[Any] = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def _UpperCamelCase ( self ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
lowerCamelCase_ : Dict = 7 if get_gpu_count() > 1 else 2
lowerCamelCase_ : Any = self.get_auto_remove_tmp_dir()
lowerCamelCase_ : Optional[Any] = F"""\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n """.split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_flax_ner.main()
lowerCamelCase_ : Tuple = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.get_auto_remove_tmp_dir()
lowerCamelCase_ : Optional[int] = F"""\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n """.split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_qa.main()
lowerCamelCase_ : Dict = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 701 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ["a", "b", "c"]
# Defaults to last layer if both are None
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
lowerCamelCase_ ,lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = BackboneMixin()
lowerCamelCase_ : List[Any] = ["a", "b", "c"]
lowerCamelCase_ : Optional[int] = ["a", "c"]
lowerCamelCase_ : Dict = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase_ : Union[str, Any] = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCamelCase_ : str = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 73 | 0 |
from math import factorial
def __magic_name__ ( lowerCAmelCase_ = 20):
'''simple docstring'''
lowerCamelCase_ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowerCamelCase_ : Union[str, Any] = n // 2
return int(factorial(_SCREAMING_SNAKE_CASE) / (factorial(_SCREAMING_SNAKE_CASE) * factorial(n - k)))
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
__magic_name__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 702 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase : Tuple = ['''accelerate''', '''launch''']
__UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase : int = '''default_config.yaml'''
__UpperCAmelCase : Tuple = config_folder / config_file
__UpperCAmelCase : int = config_folder / '''_default_config.yaml'''
__UpperCAmelCase : int = Path('''tests/test_configs''' )
@classmethod
def _UpperCamelCase ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCamelCase ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=a_ ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''test-tpu'''
__UpperCAmelCase : Tuple = '''us-central1-a'''
__UpperCAmelCase : Tuple = '''ls'''
__UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase : Dict = '''cd /usr/share'''
__UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh'''
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
| 73 | 0 |
__magic_name__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase_ : Tuple = (
F"""Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(lowerCAmelCase_)}"""
)
raise ValueError(lowerCAmelCase_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ ):
super().__init__()
self.register_modules(vqvae=a_ , unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self , a_ = 1 , a_ = None , a_ = 0.0 , a_ = 50 , a_ = "pil" , a_ = True , **a_ , ):
lowerCamelCase_ : Optional[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , )
lowerCamelCase_ : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase_ : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ : Optional[int] = {}
if accepts_eta:
lowerCamelCase_ : Optional[int] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase_ : Dict = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
lowerCamelCase_ : Optional[Any] = self.unet(a_ , a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ : List[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# decode the image latents with the VAE
lowerCamelCase_ : str = self.vqvae.decode(a_ ).sample
lowerCamelCase_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ : Optional[Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 73 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__magic_name__ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__magic_name__ = {
'''distilbert-base-uncased''': 5_1_2,
'''distilbert-base-uncased-distilled-squad''': 5_1_2,
'''distilbert-base-cased''': 5_1_2,
'''distilbert-base-cased-distilled-squad''': 5_1_2,
'''distilbert-base-german-cased''': 5_1_2,
'''distilbert-base-multilingual-cased''': 5_1_2,
}
__magic_name__ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase : Optional[Any] = DistilBertTokenizer
def __init__( self , a_=None , a_=None , a_=True , a_="[UNK]" , a_="[SEP]" , a_="[PAD]" , a_="[CLS]" , a_="[MASK]" , a_=True , a_=None , **a_ , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCamelCase_ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase_ ) != tokenize_chinese_chars
):
lowerCamelCase_ : int = getattr(UpperCamelCase_ , normalizer_state.pop("type" ) )
lowerCamelCase_ : str = do_lower_case
lowerCamelCase_ : Dict = strip_accents
lowerCamelCase_ : Optional[Any] = tokenize_chinese_chars
lowerCamelCase_ : Any = normalizer_class(**UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = do_lower_case
def _UpperCamelCase ( self , a_ , a_=None ):
lowerCamelCase_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : List[str] = [self.sep_token_id]
lowerCamelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 704 |
import re
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if len(re.findall("[ATCG]" , lowerCAmelCase_)) != len(lowerCAmelCase_):
raise ValueError("Invalid Strand")
return dna.translate(dna.maketrans("ATCG" , "TAGC"))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
def _A ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative")
if not cash_flows:
raise ValueError("Cash flows list cannot be empty")
lowerCamelCase_ : int = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_A))
return round(_A , ndigits=2)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowerCAmelCase_), magnitude * sin(lowerCAmelCase_)]
return [magnitude * cos(radians(lowerCAmelCase_)), magnitude * sin(radians(lowerCAmelCase_))]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1):
'''simple docstring'''
lowerCamelCase_ : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : float = sum(lowerCAmelCase_)
return abs(lowerCAmelCase_) < eps
if __name__ == "__main__":
# Test to check if it works
__magic_name__ = array(
[
polar_force(7_18.4, 1_8_0 - 3_0),
polar_force(8_79.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__magic_name__ = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__magic_name__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__magic_name__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 | 0 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__magic_name__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , *a_ , **a_ ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 706 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ClapFeatureExtractor'''
__UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
def __call__( self , a_=None , a_=None , a_=None , **a_ ):
lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if audios is not None:
lowerCamelCase_ : List[str] = self.feature_extractor(
a_ , sampling_rate=a_ , return_tensors=a_ , **a_ )
if text is not None and audios is not None:
lowerCamelCase_ : List[str] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.tokenizer.model_input_names
lowerCamelCase_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 73 | 0 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__magic_name__ = "src/diffusers"
__magic_name__ = "."
# This is to make sure the diffusers module imported is the one in the repo.
__magic_name__ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
__magic_name__ = spec.loader.load_module()
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return line.startswith(lowerCAmelCase_) or len(lowerCAmelCase_) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , lowerCAmelCase_) is not None
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Tuple = object_name.split(".")
lowerCamelCase_ : str = 0
# First let's find the module where our object lives.
lowerCamelCase_ : Any = parts[i]
while i < len(lowerCAmelCase_) and not os.path.isfile(os.path.join(lowerCAmelCase_ , F"""{module}.py""")):
i += 1
if i < len(lowerCAmelCase_):
lowerCamelCase_ : Union[str, Any] = os.path.join(lowerCAmelCase_ , parts[i])
if i >= len(lowerCAmelCase_):
raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""")
with open(os.path.join(lowerCAmelCase_ , F"""{module}.py""") , "r" , encoding="utf-8" , newline="\n") as f:
lowerCamelCase_ : int = f.readlines()
# Now let's find the class / func in the code!
lowerCamelCase_ : Optional[int] = ''
lowerCamelCase_ : Any = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase_) and re.search(RF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase_):
raise ValueError(F""" {object_name} does not match any function or class in {module}.""")
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCamelCase_ : str = line_index
while line_index < len(lowerCAmelCase_) and _should_continue(lines[line_index] , lowerCAmelCase_):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCamelCase_ : Union[str, Any] = lines[start_index:line_index]
return "".join(lowerCAmelCase_)
__magic_name__ = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
__magic_name__ = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
__magic_name__ = re.compile(R'''<FILL\s+[^>]*>''')
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : str = code.split("\n")
lowerCamelCase_ : Optional[int] = 0
while idx < len(lowerCAmelCase_) and len(lines[idx]) == 0:
idx += 1
if idx < len(lowerCAmelCase_):
return re.search(R"^(\s*)\S" , lines[idx]).groups()[0]
return ""
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : str = len(get_indent(lowerCAmelCase_)) > 0
if has_indent:
lowerCamelCase_ : List[str] = F"""class Bla:\n{code}"""
lowerCamelCase_ : Tuple = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCAmelCase_)
lowerCamelCase_ : List[str] = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_)
lowerCamelCase_ : Any = style_docstrings_in_code(lowerCAmelCase_)
return result[len("class Bla:\n") :] if has_indent else result
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=False):
'''simple docstring'''
with open(lowerCAmelCase_ , "r" , encoding="utf-8" , newline="\n") as f:
lowerCamelCase_ : str = f.readlines()
lowerCamelCase_ : int = []
lowerCamelCase_ : Optional[Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase_):
lowerCamelCase_ : Optional[int] = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCamelCase_ : Any = search.groups()
lowerCamelCase_ : int = find_code_in_diffusers(lowerCAmelCase_)
lowerCamelCase_ : Optional[int] = get_indent(lowerCAmelCase_)
lowerCamelCase_ : Any = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCamelCase_ : List[Any] = theoretical_indent
lowerCamelCase_ : Optional[Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCamelCase_ : Tuple = True
while line_index < len(lowerCAmelCase_) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase_):
break
lowerCamelCase_ : Any = lines[line_index]
lowerCamelCase_ : Tuple = _should_continue(lowerCAmelCase_ , lowerCAmelCase_) and re.search(F"""^{indent}# End copy""" , lowerCAmelCase_) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCamelCase_ : Any = lines[start_index:line_index]
lowerCamelCase_ : Any = ''.join(lowerCAmelCase_)
# Remove any nested `Copied from` comments to avoid circular copies
lowerCamelCase_ : str = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(lowerCAmelCase_) is None]
lowerCamelCase_ : str = '\n'.join(lowerCAmelCase_)
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase_) > 0:
lowerCamelCase_ : Optional[Any] = replace_pattern.replace("with" , "").split(",")
lowerCamelCase_ : int = [_re_replace_pattern.search(lowerCAmelCase_) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCamelCase_ : str = pattern.groups()
lowerCamelCase_ : Dict = re.sub(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if option.strip() == "all-casing":
lowerCamelCase_ : str = re.sub(obja.lower() , obja.lower() , lowerCAmelCase_)
lowerCamelCase_ : List[Any] = re.sub(obja.upper() , obja.upper() , lowerCAmelCase_)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCamelCase_ : Any = blackify(lines[start_index - 1] + theoretical_code)
lowerCamelCase_ : int = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
lowerCamelCase_ : Tuple = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCamelCase_ : List[Any] = start_index + 1
if overwrite and len(lowerCAmelCase_) > 0:
# Warn the user a file has been modified.
print(F"""Detected changes, rewriting {filename}.""")
with open(lowerCAmelCase_ , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(lowerCAmelCase_)
return diffs
def __magic_name__ ( lowerCAmelCase_ = False):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = glob.glob(os.path.join(lowerCAmelCase_ , "**/*.py") , recursive=lowerCAmelCase_)
lowerCamelCase_ : List[str] = []
for filename in all_files:
lowerCamelCase_ : Tuple = is_copy_consistent(lowerCAmelCase_ , lowerCAmelCase_)
diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(lowerCAmelCase_) > 0:
lowerCamelCase_ : Tuple = '\n'.join(lowerCAmelCase_)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__magic_name__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 707 |
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : Any = set()
# Replace all the whitespace in our sentence
lowerCamelCase_ : str = input_str.replace(" " , "")
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(lowerCAmelCase_) == 26
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = [False] * 26
for char in input_str:
if char.islower():
lowerCamelCase_ : List[Any] = True
elif char.isupper():
lowerCamelCase_ : Optional[int] = True
return all(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def __magic_name__ ( ):
'''simple docstring'''
from timeit import timeit
lowerCamelCase_ : Optional[int] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_faster()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_fastest()" , setup=lowerCAmelCase_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 73 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class lowerCAmelCase__ ( __A ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''swinv2'''
__UpperCAmelCase : int = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , a_=224 , a_=4 , a_=3 , a_=96 , a_=[2, 2, 6, 2] , a_=[3, 6, 12, 24] , a_=7 , a_=4.0 , a_=True , a_=0.0 , a_=0.0 , a_=0.1 , a_="gelu" , a_=False , a_=0.02 , a_=1E-5 , a_=32 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : int = image_size
lowerCamelCase_ : Any = patch_size
lowerCamelCase_ : List[str] = num_channels
lowerCamelCase_ : Optional[int] = embed_dim
lowerCamelCase_ : List[str] = depths
lowerCamelCase_ : Tuple = len(a_ )
lowerCamelCase_ : List[Any] = num_heads
lowerCamelCase_ : List[str] = window_size
lowerCamelCase_ : Union[str, Any] = mlp_ratio
lowerCamelCase_ : List[Any] = qkv_bias
lowerCamelCase_ : List[Any] = hidden_dropout_prob
lowerCamelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase_ : Tuple = drop_path_rate
lowerCamelCase_ : List[Any] = hidden_act
lowerCamelCase_ : List[str] = use_absolute_embeddings
lowerCamelCase_ : List[str] = layer_norm_eps
lowerCamelCase_ : Optional[int] = initializer_range
lowerCamelCase_ : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase_ : str = int(embed_dim * 2 ** (len(a_ ) - 1) )
lowerCamelCase_ : str = (0, 0, 0, 0)
| 708 |
__magic_name__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase_ : List[Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(lowerCAmelCase_)}"""
)
raise ValueError(lowerCAmelCase_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__magic_name__ = logging.get_logger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[int] = 42
__UpperCAmelCase : str = None
@staticmethod
def _UpperCamelCase ( ):
raise NotImplementedError
def _UpperCamelCase ( self , a_ , a_ , a_ , **a_ ):
raise NotImplementedError
def _UpperCamelCase ( self , a_ ):
raise NotImplementedError
def _UpperCamelCase ( self ):
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def _UpperCamelCase ( cls ):
return F"""`pip install {cls.pip_package or cls.name}`"""
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''optuna'''
@staticmethod
def _UpperCamelCase ( ):
return is_optuna_available()
def _UpperCamelCase ( self , a_ , a_ , a_ , **a_ ):
return run_hp_search_optuna(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
def _UpperCamelCase ( self , a_ ):
return default_hp_space_optuna(_lowerCAmelCase )
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Any = '''ray'''
__UpperCAmelCase : List[Any] = '''\'ray[tune]\''''
@staticmethod
def _UpperCamelCase ( ):
return is_ray_available()
def _UpperCamelCase ( self , a_ , a_ , a_ , **a_ ):
return run_hp_search_ray(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
def _UpperCamelCase ( self , a_ ):
return default_hp_space_ray(_lowerCAmelCase )
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''sigopt'''
@staticmethod
def _UpperCamelCase ( ):
return is_sigopt_available()
def _UpperCamelCase ( self , a_ , a_ , a_ , **a_ ):
return run_hp_search_sigopt(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
def _UpperCamelCase ( self , a_ ):
return default_hp_space_sigopt(_lowerCAmelCase )
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = '''wandb'''
@staticmethod
def _UpperCamelCase ( ):
return is_wandb_available()
def _UpperCamelCase ( self , a_ , a_ , a_ , **a_ ):
return run_hp_search_wandb(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
def _UpperCamelCase ( self , a_ ):
return default_hp_space_wandb(_lowerCAmelCase )
__magic_name__ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__lowerCAmelCase) > 0:
lowerCamelCase_ : Any = available_backends[0].name
if len(__lowerCAmelCase) > 1:
logger.info(
F"""{len(__lowerCAmelCase)} hyperparameter search backends available. Using {name} as the default.""")
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values()))
| 709 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''spiece.model'''}
__magic_name__ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__magic_name__ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 3
__magic_name__ = 4
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] = '''left'''
def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = do_lower_case
lowerCamelCase_ : str = remove_space
lowerCamelCase_ : Tuple = keep_accents
lowerCamelCase_ : Dict = vocab_file
lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def _UpperCamelCase ( self ):
return len(self.sp_model )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase_ : Any = self.__dict__.copy()
lowerCamelCase_ : Optional[int] = None
return state
def __setstate__( self , a_ ):
lowerCamelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : int = {}
lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , a_ ):
if self.remove_space:
lowerCamelCase_ : Optional[int] = " ".join(inputs.strip().split() )
else:
lowerCamelCase_ : str = inputs
lowerCamelCase_ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase_ : Dict = unicodedata.normalize("NFKD" , a_ )
lowerCamelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
lowerCamelCase_ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : List[Any] = self.preprocess_text(a_ )
lowerCamelCase_ : Optional[int] = self.sp_model.encode(a_ , out_type=a_ )
lowerCamelCase_ : List[str] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ : int = cur_pieces[1:]
else:
lowerCamelCase_ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def _UpperCamelCase ( self , a_ ):
return self.sp_model.PieceToId(a_ )
def _UpperCamelCase ( self , a_ ):
return self.sp_model.IdToPiece(a_ )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Dict = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def _UpperCamelCase ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ):
lowerCamelCase_ : int = kwargs.pop("use_source_tokenizer" , a_ )
lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
lowerCamelCase_ : Union[str, Any] = []
sub_texts.append(a_ )
else:
current_sub_text.append(a_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase_ : Union[str, Any] = "".join(a_ )
lowerCamelCase_ : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase_ : List[Any] = self.clean_up_tokenization(a_ )
return clean_text
else:
return text
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Any = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 73 | 0 |
from __future__ import annotations
import math
__magic_name__ = "2020.9.26"
__magic_name__ = "xcodz-dot, cclaus, dhruvmanila"
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if not all(isinstance(lowerCamelCase__ , (float, int)) for val in locals().values()):
lowerCamelCase_ : Dict = F"""Input values must either be float or int: {list(locals().values())}"""
raise TypeError(lowerCamelCase__)
lowerCamelCase_ : List[str] = ((x * distance) / (z + distance)) * scale
lowerCamelCase_ : Dict = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__):
raise TypeError("Axis must be a str")
lowerCamelCase_ : int = locals()
del input_variables["axis"]
if not all(isinstance(lowerCamelCase__ , (float, int)) for val in input_variables.values()):
lowerCamelCase_ : int = (
"Input values except axis must either be float or int: "
F"""{list(input_variables.values())}"""
)
raise TypeError(lowerCamelCase__)
lowerCamelCase_ : Union[str, Any] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
lowerCamelCase_ : Any = x * math.cos(lowerCamelCase__) - y * math.sin(lowerCamelCase__)
lowerCamelCase_ : List[Any] = y * math.cos(lowerCamelCase__) + x * math.sin(lowerCamelCase__)
lowerCamelCase_ : List[Any] = z
elif axis == "x":
lowerCamelCase_ : Union[str, Any] = y * math.cos(lowerCamelCase__) - z * math.sin(lowerCamelCase__)
lowerCamelCase_ : Optional[int] = z * math.cos(lowerCamelCase__) + y * math.sin(lowerCamelCase__)
lowerCamelCase_ : Dict = x
elif axis == "y":
lowerCamelCase_ : Optional[int] = x * math.cos(lowerCamelCase__) - z * math.sin(lowerCamelCase__)
lowerCamelCase_ : Union[str, Any] = z * math.cos(lowerCamelCase__) + x * math.sin(lowerCamelCase__)
lowerCamelCase_ : Tuple = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'")
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(f'''{rotate(1.0, 2.0, 3.0, "y", 90.0) = }''')
| 710 |
def __magic_name__ ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = True):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return int((number_a + number_a) / 2)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(lowerCAmelCase_) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowerCamelCase_ : Optional[int] = lower
lowerCamelCase_ : Tuple = higher
lowerCamelCase_ : Union[str, Any] = []
while True:
lowerCamelCase_ : Optional[int] = get_avg(lowerCAmelCase_ , lowerCAmelCase_)
last_numbers.append(lowerCAmelCase_)
if answer(lowerCAmelCase_) == "low":
lowerCamelCase_ : Any = number
elif answer(lowerCAmelCase_) == "high":
lowerCamelCase_ : Optional[int] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""")
print(F"""details : {last_numbers!s}""")
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = int(input("Enter lower value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter high value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter value to guess : ").strip())
guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
main()
| 73 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__magic_name__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''cvt'''
def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : str = patch_sizes
lowerCamelCase_ : List[Any] = patch_stride
lowerCamelCase_ : str = patch_padding
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Union[str, Any] = num_heads
lowerCamelCase_ : Optional[Any] = depth
lowerCamelCase_ : int = mlp_ratio
lowerCamelCase_ : Union[str, Any] = attention_drop_rate
lowerCamelCase_ : Optional[Any] = drop_rate
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Union[str, Any] = qkv_bias
lowerCamelCase_ : int = cls_token
lowerCamelCase_ : int = qkv_projection_method
lowerCamelCase_ : int = kernel_qkv
lowerCamelCase_ : Optional[Any] = padding_kv
lowerCamelCase_ : Optional[int] = stride_kv
lowerCamelCase_ : Optional[int] = padding_q
lowerCamelCase_ : List[Any] = stride_q
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = layer_norm_eps
| 73 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
lowerCamelCase_ : List[str] = coefficient_matrix.shape
lowerCamelCase_ : List[str] = constant_matrix.shape
if rowsa != colsa:
lowerCamelCase_ : Optional[int] = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(__lowerCAmelCase)
if colsa != 1:
lowerCamelCase_ : int = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(__lowerCAmelCase)
if rowsa != rowsa:
lowerCamelCase_ : Any = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(__lowerCAmelCase)
if len(__lowerCAmelCase) != rowsa:
lowerCamelCase_ : Optional[int] = (
"""Number of initial values must be equal to number of rows in coefficient """
F"""matrix but received {len(__lowerCAmelCase)} and {rowsa}"""
)
raise ValueError(__lowerCAmelCase)
if iterations <= 0:
raise ValueError("Iterations must be at least 1")
lowerCamelCase_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
lowerCamelCase_ : Any = table.shape
strictly_diagonally_dominant(__lowerCAmelCase)
# Iterates the whole matrix for given number of times
for _ in range(__lowerCAmelCase):
lowerCamelCase_ : Union[str, Any] = []
for row in range(__lowerCAmelCase):
lowerCamelCase_ : Optional[int] = 0
for col in range(__lowerCAmelCase):
if col == row:
lowerCamelCase_ : Dict = table[row][col]
elif col == cols - 1:
lowerCamelCase_ : Optional[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCamelCase_ : List[Any] = (temp + val) / denom
new_val.append(__lowerCAmelCase)
lowerCamelCase_ : List[str] = new_val
return [float(__lowerCAmelCase) for i in new_val]
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = table.shape
lowerCamelCase_ : List[Any] = True
for i in range(0 , __lowerCAmelCase):
lowerCamelCase_ : Dict = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 0 |
__magic_name__ = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Tuple = [False] * len(__UpperCAmelCase)
lowerCamelCase_ : Any = [s]
lowerCamelCase_ : Optional[Any] = True
while queue:
lowerCamelCase_ : Dict = queue.pop(0)
for ind in range(len(graph[u])):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__UpperCAmelCase)
lowerCamelCase_ : Any = True
lowerCamelCase_ : Tuple = u
return visited[t]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = [-1] * (len(__UpperCAmelCase))
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : List[Any] = []
lowerCamelCase_ : Optional[int] = [i[:] for i in graph] # Record original cut, copy.
while bfs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase):
lowerCamelCase_ : List[Any] = float("Inf")
lowerCamelCase_ : int = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase_ : Any = min(__UpperCAmelCase , graph[parent[s]][s])
lowerCamelCase_ : Dict = parent[s]
max_flow += path_flow
lowerCamelCase_ : Any = sink
while v != source:
lowerCamelCase_ : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase_ : Optional[Any] = parent[v]
for i in range(len(__UpperCAmelCase)):
for j in range(len(graph[0])):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j))
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 713 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''EncodecFeatureExtractor'''
__UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
lowerCamelCase_ : Optional[Any] = self.feature_extractor
lowerCamelCase_ : Optional[int] = False
def _UpperCamelCase ( self , a_=None , a_=None , a_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ )
def __call__( self , *a_ , **a_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
lowerCamelCase_ : str = kwargs.pop("audio" , a_ )
lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : int = args[0]
lowerCamelCase_ : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ )
if audio is not None:
lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCamelCase_ : Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCamelCase_ : int = audio_inputs["padding_mask"]
return inputs
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : Optional[int] = args[0]
lowerCamelCase_ : Optional[Any] = args[1:]
if audio_values is not None:
return self._decode_audio(a_ , padding_mask=a_ )
else:
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Any = to_numpy(a_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape
if padding_mask is None:
return list(a_ )
lowerCamelCase_ : Tuple = to_numpy(a_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1]
lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value
lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ )
lowerCamelCase_ : str = audio_values.tolist()
for i in range(a_ ):
lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 )
return audio_values
| 73 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''llama'''
__UpperCAmelCase : int = ['''past_key_values''']
def __init__( self , a_=3_2000 , a_=4096 , a_=1_1008 , a_=32 , a_=32 , a_=None , a_="silu" , a_=2048 , a_=0.02 , a_=1E-6 , a_=True , a_=0 , a_=1 , a_=2 , a_=1 , a_=False , a_=None , **a_ , ):
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : str = max_position_embeddings
lowerCamelCase_ : str = hidden_size
lowerCamelCase_ : Union[str, Any] = intermediate_size
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCamelCase_ : Any = num_attention_heads
lowerCamelCase_ : Any = num_key_value_heads
lowerCamelCase_ : int = hidden_act
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : Union[str, Any] = rms_norm_eps
lowerCamelCase_ : Any = pretraining_tp
lowerCamelCase_ : List[Any] = use_cache
lowerCamelCase_ : Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def _UpperCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"""got {self.rope_scaling}""" )
lowerCamelCase_ : Optional[Any] = self.rope_scaling.get("type" , _UpperCAmelCase )
lowerCamelCase_ : List[Any] = self.rope_scaling.get("factor" , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 714 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase_) , lowerCAmelCase_)
return number - int(lowerCAmelCase_)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 73 | 0 |
'''simple docstring'''
from __future__ import annotations
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self , a_ = 0 ):
lowerCamelCase_ : Union[str, Any] = key
def _UpperCamelCase ( self , a_ , a_ ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase )
lowerCamelCase_ : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase ) ^ key ) for ch in content]
def _UpperCamelCase ( self , a_ , a_ ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase )
lowerCamelCase_ : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCamelCase ) ^ key ) for ch in content]
def _UpperCamelCase ( self , a_ , a_ = 0 ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase )
lowerCamelCase_ : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCamelCase_ : Any = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase ) ^ key )
return ans
def _UpperCamelCase ( self , a_ , a_ = 0 ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase )
lowerCamelCase_ : int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCamelCase_ : Optional[Any] = """"""
for ch in content:
ans += chr(ord(_UpperCamelCase ) ^ key )
return ans
def _UpperCamelCase ( self , a_ , a_ = 0 ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase )
try:
with open(_UpperCamelCase ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase ) )
except OSError:
return False
return True
def _UpperCamelCase ( self , a_ , a_ ):
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase )
try:
with open(_UpperCamelCase ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ):
lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ : str = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : List[str] = apply_ocr
def _UpperCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "apply_ocr" ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# with apply_OCR = True
lowerCamelCase_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 73 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 716 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''luke'''
def __init__( self , a_=5_0267 , a_=50_0000 , a_=768 , a_=256 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=True , a_=None , a_=1 , a_=0 , a_=2 , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : Optional[int] = entity_vocab_size
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Dict = entity_emb_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Tuple = intermediate_size
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Optional[Any] = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : Optional[int] = use_entity_aware_attention
lowerCamelCase_ : str = classifier_dropout
| 73 | 0 |
import os
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Dict = os.path.join(os.path.dirname(_lowerCamelCase ) , "num.txt" )
with open(_lowerCamelCase ) as file_hand:
return str(sum(int(_lowerCamelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 717 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : Optional[datasets.Features] = None
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
import pyspark
def generate_fn():
lowerCamelCase_ : Dict = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id"))
for partition_id in partition_order:
lowerCamelCase_ : Dict = df_with_partition_id.select("*").where(F"""part_id = {partition_id}""").drop("part_id")
lowerCamelCase_ : Dict = partition_df.collect()
lowerCamelCase_ : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , a_ , a_=None , ):
lowerCamelCase_ : Dict = df
lowerCamelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase_ : int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Dict = self.split_shard_indices_by_worker(a_ , a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
@property
def _UpperCamelCase ( self ):
return len(self.partition_order )
class lowerCAmelCase__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__UpperCAmelCase : Any = SparkConfig
def __init__( self , a_ , a_ = None , a_ = None , **a_ , ):
import pyspark
lowerCamelCase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase_ : Optional[Any] = df
lowerCamelCase_ : List[Any] = working_dir
super().__init__(
cache_dir=a_ , config_name=str(self.df.semanticHash() ) , **a_ , )
def _UpperCamelCase ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(a_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a_ )
lowerCamelCase_ : Optional[Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a_ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase_ : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _UpperCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self , a_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _UpperCamelCase ( self , a_ ):
import pyspark
def get_arrow_batch_size(a_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
lowerCamelCase_ : str = self.df.count()
lowerCamelCase_ : List[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase_ : Any = (
self.df.limit(a_ )
.repartition(1 )
.mapInArrow(a_ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase_ : int = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase_ : Union[str, Any] = min(a_ , int(approx_total_size / max_shard_size ) )
lowerCamelCase_ : int = self.df.repartition(a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , ):
import pyspark
lowerCamelCase_ : str = ParquetWriter if file_format == "parquet" else ArrowWriter
lowerCamelCase_ : int = os.path.join(self._working_dir , os.path.basename(a_ ) ) if self._working_dir else fpath
lowerCamelCase_ : Optional[Any] = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase_ : int = self.config.features
lowerCamelCase_ : Any = self._writer_batch_size
lowerCamelCase_ : Tuple = self._fs.storage_options
def write_arrow(a_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId()
lowerCamelCase_ : Optional[int] = next(a_ , a_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Optional[int] = writer_class(
features=a_ , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(a_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase_ ,lowerCamelCase_ : List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
lowerCamelCase_ : List[str] = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(a_ )
if writer._num_bytes > 0:
lowerCamelCase_ ,lowerCamelCase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a_ ) ):
lowerCamelCase_ : str = os.path.join(os.path.dirname(a_ ) , os.path.basename(a_ ) )
shutil.move(a_ , a_ )
lowerCamelCase_ : int = (
self.df.mapInArrow(a_ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _UpperCamelCase ( self , a_ , a_ = "arrow" , a_ = None , a_ = None , **a_ , ):
self._validate_cache_dir()
lowerCamelCase_ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a_ )
lowerCamelCase_ : Dict = not is_remote_filesystem(self._fs )
lowerCamelCase_ : List[str] = os.path.join if is_local else posixpath.join
lowerCamelCase_ : Any = "-TTTTT-SSSSS-of-NNNNN"
lowerCamelCase_ : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
lowerCamelCase_ : int = path_join(self._output_dir , a_ )
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : int = 0
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Any = []
for task_id, content in self._prepare_split_single(a_ , a_ , a_ ):
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a_ )
lowerCamelCase_ : Dict = total_num_examples
lowerCamelCase_ : Any = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
lowerCamelCase_ : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase_ : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a_ , a_ , a_ , ):
rename(
a_ , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , )
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : Dict = 0
for i in range(len(a_ ) ):
lowerCamelCase_ ,lowerCamelCase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(a_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a_ , len(a_ ) ).map(lambda a_ : _rename_shard(*a_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[int] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(a_ , "" ) , )
def _UpperCamelCase ( self , a_ , ):
return SparkExamplesIterable(self.df )
| 73 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowerCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Any = '''mctct'''
def __init__( self , a_=8065 , a_=1536 , a_=36 , a_=6144 , a_=4 , a_=384 , a_=920 , a_=1E-5 , a_=0.3 , a_="relu" , a_=0.02 , a_=0.3 , a_=0.3 , a_=1 , a_=0 , a_=2 , a_=1 , a_=0.3 , a_=1 , a_=(7,) , a_=(3,) , a_=80 , a_=1 , a_=None , a_="sum" , a_=False , **a_ , ):
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
lowerCamelCase_ : int = vocab_size
lowerCamelCase_ : Tuple = hidden_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : List[Any] = intermediate_size
lowerCamelCase_ : Union[str, Any] = num_attention_heads
lowerCamelCase_ : Optional[Any] = attention_head_dim
lowerCamelCase_ : Optional[Any] = max_position_embeddings
lowerCamelCase_ : Tuple = layer_norm_eps
lowerCamelCase_ : str = layerdrop
lowerCamelCase_ : List[str] = hidden_act
lowerCamelCase_ : Optional[Any] = initializer_range
lowerCamelCase_ : List[str] = hidden_dropout_prob
lowerCamelCase_ : int = attention_probs_dropout_prob
lowerCamelCase_ : Tuple = pad_token_id
lowerCamelCase_ : Tuple = bos_token_id
lowerCamelCase_ : List[str] = eos_token_id
lowerCamelCase_ : int = conv_glu_dim
lowerCamelCase_ : str = conv_dropout
lowerCamelCase_ : Tuple = num_conv_layers
lowerCamelCase_ : int = input_feat_per_channel
lowerCamelCase_ : Optional[Any] = input_channels
lowerCamelCase_ : Any = conv_channels
lowerCamelCase_ : Tuple = ctc_loss_reduction
lowerCamelCase_ : Any = ctc_zero_infinity
# prevents config testing fail with exporting to json
lowerCamelCase_ : Dict = list(lowerCamelCase_ )
lowerCamelCase_ : Optional[int] = list(lowerCamelCase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
| 718 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf)
lowerCamelCase_ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt))
lowerCamelCase_ : Optional[int] = new_cost_f
lowerCamelCase_ : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = -1
lowerCamelCase_ : Tuple = set()
lowerCamelCase_ : Dict = set()
lowerCamelCase_ : int = {source: 0}
lowerCamelCase_ : str = {destination: 0}
lowerCamelCase_ : Tuple = {source: None}
lowerCamelCase_ : Dict = {destination: None}
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : List[str] = np.inf
queue_forward.put((0, source))
queue_backward.put((0, destination))
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get()
visited_forward.add(lowerCAmelCase_)
lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get()
visited_backward.add(lowerCAmelCase_)
lowerCamelCase_ : Any = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
lowerCamelCase_ : Dict = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase_ : Union[str, Any] = shortest_distance
return shortest_path_distance
__magic_name__ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__magic_name__ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
from collections.abc import Callable
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Tuple = a
lowerCamelCase_ : Any = b
if function(_A) == 0: # one of the a or b is a root for the function
return a
elif function(_A) == 0:
return b
elif (
function(_A) * function(_A) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval.")
else:
lowerCamelCase_ : Tuple = start + (end - start) / 2.0
while abs(start - mid) > 10**-7: # until precisely equals to 10^-7
if function(_A) == 0:
return mid
elif function(_A) * function(_A) < 0:
lowerCamelCase_ : List[Any] = mid
else:
lowerCamelCase_ : Union[str, Any] = mid
lowerCamelCase_ : Dict = start + (end - start) / 2.0
return mid
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 719 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ctrl'''
__UpperCAmelCase : Dict = ['''past_key_values''']
__UpperCAmelCase : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a_=24_6534 , a_=256 , a_=1280 , a_=8192 , a_=48 , a_=16 , a_=0.1 , a_=0.1 , a_=1E-6 , a_=0.02 , a_=True , **a_ , ):
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : Any = n_positions
lowerCamelCase_ : Optional[int] = n_embd
lowerCamelCase_ : List[Any] = n_layer
lowerCamelCase_ : Union[str, Any] = n_head
lowerCamelCase_ : str = dff
lowerCamelCase_ : Tuple = resid_pdrop
lowerCamelCase_ : Any = embd_pdrop
lowerCamelCase_ : Dict = layer_norm_epsilon
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : Any = use_cache
super().__init__(**a_ )
| 73 | 0 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = self.dummy_uncond_unet
lowerCamelCase_ : Any = PNDMScheduler()
lowerCamelCase_ : Tuple = PNDMPipeline(unet=snake_case_ , scheduler=snake_case_ )
pndm.to(snake_case_ )
pndm.set_progress_bar_config(disable=snake_case_ )
lowerCamelCase_ : Union[str, Any] = torch.manual_seed(0 )
lowerCamelCase_ : Any = pndm(generator=snake_case_ , num_inference_steps=20 , output_type="numpy" ).images
lowerCamelCase_ : Any = torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] = pndm(generator=snake_case_ , num_inference_steps=20 , output_type="numpy" , return_dict=snake_case_ )[0]
lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ : Dict = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = "google/ddpm-cifar10-32"
lowerCamelCase_ : int = UNetaDModel.from_pretrained(snake_case_ )
lowerCamelCase_ : Any = PNDMScheduler()
lowerCamelCase_ : List[str] = PNDMPipeline(unet=snake_case_ , scheduler=snake_case_ )
pndm.to(snake_case_ )
pndm.set_progress_bar_config(disable=snake_case_ )
lowerCamelCase_ : Dict = torch.manual_seed(0 )
lowerCamelCase_ : Any = pndm(generator=snake_case_ , output_type="numpy" ).images
lowerCamelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ : int = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 720 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCamelCase )}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
__UpperCAmelCase : str = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
def _UpperCamelCase ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(default=__lowerCamelCase, metadata={'''help''': '''The input training data file (a text file).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__UpperCAmelCase : Optional[int] = field(
default=5, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
__UpperCAmelCase : float = field(
default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
def _UpperCamelCase ( self ):
if self.train_file is not None:
lowerCamelCase_ : str = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase_ : Union[str, Any] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "r" , encoding="utf-8") as f:
lowerCamelCase_ : Tuple = [json.loads(lowerCAmelCase_) for line in f.read().splitlines() if (len(lowerCAmelCase_) > 0 and not line.isspace())]
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
lowerCamelCase_ : Any = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase_ : List[Any] = refs
return Dataset.from_dict(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase_ : List[str] = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : Dict = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""")
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name)
if "validation" not in datasets.keys():
lowerCamelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
lowerCamelCase_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
lowerCamelCase_ : Dict = {}
if data_args.train_file is not None:
lowerCamelCase_ : str = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Any = data_args.validation_file
lowerCamelCase_ : Any = data_args.train_file.split(".")[-1]
if extension == "txt":
lowerCamelCase_ : List[str] = "text"
lowerCamelCase_ : Dict = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : Optional[Any] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""")
config.update_from_string(model_args.config_overrides)
logger.info(F"""New config: {config}""")
lowerCamelCase_ : List[str] = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name.")
if model_args.model_name_or_path:
lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch")
lowerCamelCase_ : Dict = AutoModelForMaskedLM.from_config(lowerCAmelCase_)
model.resize_token_embeddings(len(lowerCAmelCase_))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase_ : Optional[Any] = datasets["train"].column_names
else:
lowerCamelCase_ : Dict = datasets["validation"].column_names
lowerCamelCase_ : Union[str, Any] = "text" if "text" in column_names else column_names[0]
lowerCamelCase_ : Optional[Any] = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_):
# Remove empty lines
lowerCamelCase_ : str = [line for line in examples["text"] if len(lowerCAmelCase_) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length)
lowerCamelCase_ : str = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase_ : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file)
if data_args.validation_ref_file is not None:
lowerCamelCase_ : List[str] = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file)
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase_ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase_ : Union[str, Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase_ : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
lowerCamelCase_ : int = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase_ : Dict = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
lowerCamelCase_ : Dict = model_args.model_name_or_path
else:
lowerCamelCase_ : int = None
lowerCamelCase_ : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_)
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Tuple = os.path.join(training_args.output_dir , "train_results.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(train_result.metrics.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json"))
# Evaluation
lowerCamelCase_ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
lowerCamelCase_ : Tuple = trainer.evaluate()
lowerCamelCase_ : str = math.exp(eval_output["eval_loss"])
lowerCamelCase_ : Tuple = perplexity
lowerCamelCase_ : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Eval results *****")
for key, value in sorted(results.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
return results
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 73 | 0 |
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=0):
'''simple docstring'''
if name is None:
lowerCamelCase_ : Dict = None
else:
lowerCamelCase_ : Dict = """.""" * max(0 , spaces - 2) + """# {:""" + str(50 - spaces) + """s}"""
lowerCamelCase_ : Any = fmt.format(__UpperCamelCase)
# Print and recurse (if needed).
if isinstance(__UpperCamelCase , __UpperCamelCase):
if msg is not None:
print(__UpperCamelCase)
for k in val.keys():
recursive_print(__UpperCamelCase , val[k] , spaces + 2)
elif isinstance(__UpperCamelCase , torch.Tensor):
print(__UpperCamelCase , ":" , val.size())
else:
print(__UpperCamelCase , ":" , __UpperCamelCase)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCamelCase_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCamelCase_ : Tuple = param.view(*__UpperCamelCase)
lowerCamelCase_ : Tuple = param.transpose(0 , 2)
lowerCamelCase_ : Any = param.transpose(1 , 2).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCamelCase_ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCamelCase_ : str = param.view(*__UpperCamelCase)
lowerCamelCase_ : Dict = param.transpose(0 , 1).contiguous()
lowerCamelCase_ : int = param.view(*__UpperCamelCase)
return param
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Dict = {}
# old versions did not store training args
lowerCamelCase_ : List[str] = input_state_dict.get("args" , __UpperCamelCase)
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCamelCase_ : Tuple = ds_args.padded_vocab_size
lowerCamelCase_ : Optional[int] = ds_args.max_position_embeddings
lowerCamelCase_ : Union[str, Any] = ds_args.hidden_size
lowerCamelCase_ : Union[str, Any] = ds_args.num_layers
lowerCamelCase_ : str = ds_args.num_attention_heads
lowerCamelCase_ : str = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCamelCase_ : Union[str, Any] = config.n_head
# The hidden_size per head.
lowerCamelCase_ : Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCamelCase_ : Optional[Any] = input_state_dict["""checkpoint_version"""]
else:
lowerCamelCase_ : int = 0.0
# The model.
lowerCamelCase_ : List[str] = input_state_dict["""model"""]
# The language model.
lowerCamelCase_ : str = model["""language_model"""]
# The embeddings.
lowerCamelCase_ : Tuple = lm["""embedding"""]
# The word embeddings.
lowerCamelCase_ : List[str] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
lowerCamelCase_ : Optional[int] = word_embeddings[: config.vocab_size, :]
lowerCamelCase_ : Optional[int] = word_embeddings
# The position embeddings.
lowerCamelCase_ : List[Any] = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCamelCase_ : Tuple = pos_embeddings.size(0)
if n_positions != config.n_positions:
raise ValueError(
F"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match""")
# Store the position embeddings.
lowerCamelCase_ : Union[str, Any] = pos_embeddings
# The transformer.
lowerCamelCase_ : Optional[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
lowerCamelCase_ : Optional[Any] = re.compile(R"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)")
# The simple map of names for "automated" rules.
lowerCamelCase_ : List[str] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCamelCase_ : int = layer_re.match(__UpperCamelCase)
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCamelCase_ : Tuple = int(m.group(1))
# The name of the operation.
lowerCamelCase_ : Any = m.group(2)
# Is it a weight or a bias?
lowerCamelCase_ : Union[str, Any] = m.group(3)
# The name of the layer.
lowerCamelCase_ : str = F"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm"):
lowerCamelCase_ : Dict = """ln_1""" if op_name.startswith("input") else """ln_2"""
lowerCamelCase_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCamelCase_ : Optional[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa)).view(
1 , 1 , __UpperCamelCase , __UpperCamelCase)
lowerCamelCase_ : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCamelCase_ : str = torch.tensor(-1E4 , dtype=torch.floataa)
lowerCamelCase_ : List[Any] = masked_bias
lowerCamelCase_ : Optional[int] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase)
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCamelCase_ : str = out_val.transpose(0 , 1).contiguous()
# Store.
lowerCamelCase_ : Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCamelCase_ : Optional[Any] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase)
# Store. No change of shape.
lowerCamelCase_ : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCamelCase_ : Any = megatron_to_transformers[op_name]
lowerCamelCase_ : str = val.transpose(0 , 1)
# Copy the bias.
elif weight_or_bias == "bias":
lowerCamelCase_ : List[str] = megatron_to_transformers[op_name]
lowerCamelCase_ : Tuple = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCamelCase_ : Dict = transformer["""final_layernorm.weight"""]
lowerCamelCase_ : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCamelCase_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true")
parser.add_argument(
"path_to_checkpoint" , type=__UpperCamelCase , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=__UpperCamelCase , help="An optional config json file describing the pre-trained model." , )
lowerCamelCase_ : str = parser.parse_args()
# Extract the basename.
lowerCamelCase_ : Optional[Any] = os.path.dirname(args.path_to_checkpoint)
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""")
if args.path_to_checkpoint.endswith(".zip"):
with zipfile.ZipFile(args.path_to_checkpoint , "r") as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt") as pytorch_dict:
lowerCamelCase_ : Optional[int] = torch.load(__UpperCamelCase , map_location="cpu")
else:
lowerCamelCase_ : List[Any] = torch.load(args.path_to_checkpoint , map_location="cpu")
lowerCamelCase_ : Any = input_state_dict.get("args" , __UpperCamelCase)
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCamelCase_ : Any = """gelu_fast"""
elif ds_args.openai_gelu:
lowerCamelCase_ : Tuple = """gelu_new"""
else:
lowerCamelCase_ : List[str] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
lowerCamelCase_ : Dict = """gelu_new"""
# Spell out all parameters in case the defaults change.
lowerCamelCase_ : List[str] = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=__UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=__UpperCamelCase , summary_activation=__UpperCamelCase , summary_proj_to_labels=__UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
lowerCamelCase_ : List[Any] = GPTaConfig.from_json_file(args.config_file)
lowerCamelCase_ : int = ["""GPT2LMHeadModel"""]
# Convert.
print("Converting")
lowerCamelCase_ : Tuple = convert_megatron_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCamelCase , __UpperCamelCase)
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCamelCase_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCamelCase_ : Optional[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
lowerCamelCase_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"""Unrecognized tokenizer_type {tokenizer_type}""")
else:
lowerCamelCase_ : List[str] = """gpt2"""
lowerCamelCase_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase)
lowerCamelCase_ : List[str] = type(__UpperCamelCase).__name__
lowerCamelCase_ : Optional[int] = tokenizer_class
# Store the config to file.
print("Saving config")
config.save_pretrained(__UpperCamelCase)
# Save tokenizer based on args
print(F"""Adding {tokenizer_class} tokenizer files""")
tokenizer.save_pretrained(__UpperCamelCase)
# Store the state_dict to file.
lowerCamelCase_ : List[Any] = os.path.join(__UpperCamelCase , "pytorch_model.bin")
print(F"""Saving checkpoint to \"{output_checkpoint_file}\"""")
torch.save(__UpperCamelCase , __UpperCamelCase)
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 721 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
# setable values
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _UpperCamelCase ( cls ):
return cls()
@dataclass
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : KarrasVeSchedulerState
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
return True
@register_to_config
def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.0_07 , a_ = 80 , a_ = 0.05 , a_ = 50 , ):
pass
def _UpperCamelCase ( self ):
return KarrasVeSchedulerState.create()
def _UpperCamelCase ( self , a_ , a_ , a_ = () ):
lowerCamelCase_ : List[Any] = jnp.arange(0 , a_ )[::-1].copy()
lowerCamelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase_ : Union[str, Any] = random.split(a_ , num=1 )
lowerCamelCase_ : str = self.config.s_noise * random.normal(key=a_ , shape=sample.shape )
lowerCamelCase_ : List[str] = sigma + gamma * sigma
lowerCamelCase_ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCamelCase_ : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output
lowerCamelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
raise NotImplementedError()
| 73 | 0 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__magic_name__ : List[Any] = logging.getLogger(__name__)
class lowerCAmelCase__ ( _lowercase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''sequence-classification'''
def __init__( self , a_ ):
if type(A_ ) == dict:
lowerCamelCase_ : List[str] = Namespace(**A_ )
lowerCamelCase_ : Tuple = glue_output_modes[hparams.task]
lowerCamelCase_ : List[str] = glue_tasks_num_labels[hparams.task]
super().__init__(A_ , A_ , self.mode )
def _UpperCamelCase ( self , **a_ ):
return self.model(**A_ )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : str = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase_ : List[Any] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase_ : Optional[int] = self(**A_ )
lowerCamelCase_ : Optional[int] = outputs[0]
lowerCamelCase_ : Tuple = self.trainer.lr_schedulers[0]["scheduler"]
lowerCamelCase_ : Any = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.hparams
lowerCamelCase_ : List[Any] = processors[args.task]()
lowerCamelCase_ : Dict = processor.get_labels()
for mode in ["train", "dev"]:
lowerCamelCase_ : List[str] = self._feature_file(A_ )
if os.path.exists(A_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , A_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowerCamelCase_ : Optional[Any] = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
lowerCamelCase_ : Union[str, Any] = convert_examples_to_features(
A_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , A_ )
torch.save(A_ , A_ )
def _UpperCamelCase ( self , a_ , a_ , a_ = False ):
lowerCamelCase_ : List[Any] = "dev" if mode == "test" else mode
lowerCamelCase_ : Tuple = self._feature_file(A_ )
logger.info("Loading features from cached file %s" , A_ )
lowerCamelCase_ : int = torch.load(A_ )
lowerCamelCase_ : str = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase_ : Dict = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowerCamelCase_ : Union[str, Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase_ : Any = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase_ : Optional[int] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(A_ , A_ , A_ , A_ ) , batch_size=A_ , shuffle=A_ , )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase_ : int = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase_ : Optional[int] = self(**A_ )
lowerCamelCase_ ,lowerCamelCase_ : List[str] = outputs[:2]
lowerCamelCase_ : List[str] = logits.detach().cpu().numpy()
lowerCamelCase_ : Any = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Any = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
lowerCamelCase_ : Union[str, Any] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase_ : Tuple = np.argmax(A_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase_ : Dict = np.squeeze(A_ )
lowerCamelCase_ : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowerCamelCase_ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase_ : str = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase_ : Any = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , A_ , A_ )}
lowerCamelCase_ : str = dict(results.items() )
lowerCamelCase_ : Optional[Any] = results
return ret, preds_list, out_label_list
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : int = self._eval_end(A_ )
lowerCamelCase_ : List[Any] = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[Any] = self._eval_end(A_ )
lowerCamelCase_ : Union[str, Any] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _UpperCamelCase ( a_ , a_ ):
BaseTransformer.add_model_specific_args(A_ , A_ )
parser.add_argument(
"--max_seq_length" , default=128 , type=A_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=A_ , required=A_ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=A_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
add_generic_args(snake_case__ , os.getcwd())
lowerCamelCase_ : Any = GLUETransformer.add_model_specific_args(snake_case__ , os.getcwd())
lowerCamelCase_ : Optional[int] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCamelCase_ : Optional[Any] = os.path.join(
"./results" , F"""{args.task}_{time.strftime('%Y%m%d_%H%M%S')}""" , )
os.makedirs(args.output_dir)
lowerCamelCase_ : Dict = GLUETransformer(snake_case__)
lowerCamelCase_ : int = generic_train(snake_case__ , snake_case__)
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCamelCase_ : List[str] = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt") , recursive=snake_case__))
lowerCamelCase_ : Union[str, Any] = model.load_from_checkpoint(checkpoints[-1])
return trainer.test(snake_case__)
if __name__ == "__main__":
main()
| 700 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = StableDiffusionDiffEditPipeline
__UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
__UpperCAmelCase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : List[str] = frozenset([] )
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , )
lowerCamelCase_ : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
lowerCamelCase_ : Dict = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_zero=a_ , )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase_ : Optional[Any] = CLIPTextModel(a_ )
lowerCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : str = floats_tensor((1, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : List[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : List[Any] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Tuple = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Any = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Tuple = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : int = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Optional[int] = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Optional[int] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Union[str, Any] = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self ):
if not hasattr(self.pipeline_class , "_optional_components" ):
return
lowerCamelCase_ : List[Any] = self.get_dummy_components()
lowerCamelCase_ : int = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a_ , a_ , a_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCamelCase_ : int = self.get_dummy_inputs(a_ )
lowerCamelCase_ : int = pipe(**a_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a_ )
lowerCamelCase_ : Optional[int] = self.pipeline_class.from_pretrained(a_ )
pipe_loaded.to(a_ )
pipe_loaded.set_progress_bar_config(disable=a_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a_ , a_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCamelCase_ : List[str] = self.get_dummy_inputs(a_ )
lowerCamelCase_ : Optional[int] = pipe_loaded(**a_ )[0]
lowerCamelCase_ : Optional[int] = np.abs(output - output_loaded ).max()
self.assertLess(a_ , 1E-4 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : List[Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = self.get_dummy_mask_inputs(a_ )
lowerCamelCase_ : int = pipe.generate_mask(**a_ )
lowerCamelCase_ : List[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCamelCase_ : List[str] = np.array([0] * 9 )
lowerCamelCase_ : Optional[int] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Dict = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : Dict = pipe.invert(**a_ ).images
lowerCamelCase_ : str = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Dict = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
def _UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : int = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"}
lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler(**a_ )
lowerCamelCase_ : List[str] = DPMSolverMultistepInverseScheduler(**a_ )
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : int = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : str = pipe.invert(**a_ ).images
lowerCamelCase_ : int = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Union[str, Any] = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _UpperCamelCase ( cls ):
lowerCamelCase_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
lowerCamelCase_ : int = raw_image.convert("RGB" ).resize((768, 768) )
lowerCamelCase_ : List[Any] = raw_image
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = torch.manual_seed(0 )
lowerCamelCase_ : Tuple = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : str = "a bowl of fruit"
lowerCamelCase_ : Optional[int] = "a bowl of pears"
lowerCamelCase_ : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ ).latents
lowerCamelCase_ : List[str] = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = "a bowl of fruit"
lowerCamelCase_ : Dict = "a bowl of pears"
lowerCamelCase_ : Optional[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ , num_inference_steps=25 , ).latents
lowerCamelCase_ : Any = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 73 | 0 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = cva.getAffineTransform(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return cva.warpAffine(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (rows, cols))
if __name__ == "__main__":
# read original image
__magic_name__ = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__magic_name__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__magic_name__ , __magic_name__ = gray_img.shape
# set different points to rotate image
__magic_name__ = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
__magic_name__ = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
__magic_name__ = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
__magic_name__ = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
__magic_name__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__magic_name__ = plt.figure(1)
__magic_name__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 701 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ["a", "b", "c"]
# Defaults to last layer if both are None
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
lowerCamelCase_ ,lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = BackboneMixin()
lowerCamelCase_ : List[Any] = ["a", "b", "c"]
lowerCamelCase_ : Optional[int] = ["a", "c"]
lowerCamelCase_ : Dict = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase_ : Union[str, Any] = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCamelCase_ : str = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 73 | 0 |
import os
import sys
import transformers
__magic_name__ = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 702 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase : Tuple = ['''accelerate''', '''launch''']
__UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase : int = '''default_config.yaml'''
__UpperCAmelCase : Tuple = config_folder / config_file
__UpperCAmelCase : int = config_folder / '''_default_config.yaml'''
__UpperCAmelCase : int = Path('''tests/test_configs''' )
@classmethod
def _UpperCamelCase ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCamelCase ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=a_ ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''test-tpu'''
__UpperCAmelCase : Tuple = '''us-central1-a'''
__UpperCAmelCase : Tuple = '''ls'''
__UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase : Dict = '''cd /usr/share'''
__UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh'''
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
| 73 | 0 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__magic_name__ = parser.parse_args()
__magic_name__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__magic_name__ = CLIPImageProcessor()
__magic_name__ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__magic_name__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 703 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ ):
super().__init__()
self.register_modules(vqvae=a_ , unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self , a_ = 1 , a_ = None , a_ = 0.0 , a_ = 50 , a_ = "pil" , a_ = True , **a_ , ):
lowerCamelCase_ : Optional[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , )
lowerCamelCase_ : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase_ : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ : Optional[int] = {}
if accepts_eta:
lowerCamelCase_ : Optional[int] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase_ : Dict = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
lowerCamelCase_ : Optional[Any] = self.unet(a_ , a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ : List[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# decode the image latents with the VAE
lowerCamelCase_ : str = self.vqvae.decode(a_ ).sample
lowerCamelCase_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ : Optional[Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 73 | 0 |
import torch
def __magic_name__ ( ):
'''simple docstring'''
if torch.cuda.is_available():
lowerCamelCase_ : str = torch.cuda.device_count()
else:
lowerCamelCase_ : str = 0
print(F"""Successfully ran on {num_gpus} GPUs""")
if __name__ == "__main__":
main()
| 704 |
import re
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if len(re.findall("[ATCG]" , lowerCAmelCase_)) != len(lowerCAmelCase_):
raise ValueError("Invalid Strand")
return dna.translate(dna.maketrans("ATCG" , "TAGC"))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
def _A ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Tuple = 0
lowerCamelCase_ : Union[str, Any] = len(lowerCAmelCase_)
for i in range(n - 1):
for j in range(i + 1 , lowerCAmelCase_):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _A ( lowerCAmelCase_):
'''simple docstring'''
if len(lowerCAmelCase_) <= 1:
return arr, 0
lowerCamelCase_ : Optional[Any] = len(lowerCAmelCase_) // 2
lowerCamelCase_ : List[Any] = arr[0:mid]
lowerCamelCase_ : Tuple = arr[mid:]
lowerCamelCase_ : str = count_inversions_recursive(lowerCAmelCase_)
lowerCamelCase_ : str = count_inversions_recursive(lowerCAmelCase_)
lowerCamelCase_ : Any = _count_cross_inversions(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : List[str] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _A ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : Union[str, Any] = 0
while i < len(lowerCAmelCase_) and j < len(lowerCAmelCase_):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCAmelCase_) - i
r.append(q[j])
j += 1
else:
r.append(p[i])
i += 1
if i < len(lowerCAmelCase_):
r.extend(p[i:])
else:
r.extend(q[j:])
return r, num_inversion
def _A ( ):
'''simple docstring'''
lowerCamelCase_ : str = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowerCamelCase_ : Tuple = count_inversions_bf(lowerCAmelCase_)
lowerCamelCase_ : Union[str, Any] = count_inversions_recursive(lowerCAmelCase_)
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , lowerCAmelCase_)
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowerCamelCase_ : str = count_inversions_bf(lowerCAmelCase_)
lowerCamelCase_ : str = count_inversions_recursive(lowerCAmelCase_)
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowerCAmelCase_)
# an empty list should also have zero inversions
lowerCamelCase_ : Any = []
lowerCamelCase_ : List[str] = count_inversions_bf(lowerCAmelCase_)
lowerCamelCase_ : Tuple = count_inversions_recursive(lowerCAmelCase_)
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowerCAmelCase_)
if __name__ == "__main__":
main()
| 705 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowerCAmelCase_), magnitude * sin(lowerCAmelCase_)]
return [magnitude * cos(radians(lowerCAmelCase_)), magnitude * sin(radians(lowerCAmelCase_))]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1):
'''simple docstring'''
lowerCamelCase_ : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : float = sum(lowerCAmelCase_)
return abs(lowerCAmelCase_) < eps
if __name__ == "__main__":
# Test to check if it works
__magic_name__ = array(
[
polar_force(7_18.4, 1_8_0 - 3_0),
polar_force(8_79.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__magic_name__ = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__magic_name__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__magic_name__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''OwlViTFeatureExtractor''']
__magic_name__ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 706 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ClapFeatureExtractor'''
__UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
def __call__( self , a_=None , a_=None , a_=None , **a_ ):
lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if audios is not None:
lowerCamelCase_ : List[str] = self.feature_extractor(
a_ , sampling_rate=a_ , return_tensors=a_ , **a_ )
if text is not None and audios is not None:
lowerCamelCase_ : List[str] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.tokenizer.model_input_names
lowerCamelCase_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 73 | 0 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__magic_name__ = logging.get_logger(__name__)
# General docstring
__magic_name__ = '''ResNetConfig'''
# Base docstring
__magic_name__ = '''microsoft/resnet-50'''
__magic_name__ = [1, 2_0_4_8, 7, 7]
# Image classification docstring
__magic_name__ = '''microsoft/resnet-50'''
__magic_name__ = '''tiger cat'''
__magic_name__ = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ = 3 , a_ = 1 , a_ = "relu" ):
super().__init__()
lowerCamelCase_ : Any = nn.Convad(
__lowerCAmelCase , __lowerCAmelCase , kernel_size=__lowerCAmelCase , stride=__lowerCAmelCase , padding=kernel_size // 2 , bias=__lowerCAmelCase )
lowerCamelCase_ : Optional[Any] = nn.BatchNormad(__lowerCAmelCase )
lowerCamelCase_ : int = ACTaFN[activation] if activation is not None else nn.Identity()
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : List[Any] = self.convolution(__lowerCAmelCase )
lowerCamelCase_ : List[str] = self.normalization(__lowerCAmelCase )
lowerCamelCase_ : int = self.activation(__lowerCAmelCase )
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , a_ ):
super().__init__()
lowerCamelCase_ : Optional[int] = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
lowerCamelCase_ : Optional[Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
lowerCamelCase_ : int = config.num_channels
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[int] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
lowerCamelCase_ : Any = self.embedder(__lowerCAmelCase )
lowerCamelCase_ : Dict = self.pooler(__lowerCAmelCase )
return embedding
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ = 2 ):
super().__init__()
lowerCamelCase_ : Any = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , stride=__lowerCAmelCase , bias=__lowerCAmelCase )
lowerCamelCase_ : Tuple = nn.BatchNormad(__lowerCAmelCase )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[int] = self.convolution(__lowerCAmelCase )
lowerCamelCase_ : Dict = self.normalization(__lowerCAmelCase )
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ = 1 , a_ = "relu" ):
super().__init__()
lowerCamelCase_ : Dict = in_channels != out_channels or stride != 1
lowerCamelCase_ : Union[str, Any] = (
ResNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase_ : Union[str, Any] = nn.Sequential(
ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , activation=__lowerCAmelCase ) , )
lowerCamelCase_ : Optional[int] = ACTaFN[activation]
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Any = hidden_state
lowerCamelCase_ : Any = self.layer(__lowerCAmelCase )
lowerCamelCase_ : str = self.shortcut(__lowerCAmelCase )
hidden_state += residual
lowerCamelCase_ : List[Any] = self.activation(__lowerCAmelCase )
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ = 1 , a_ = "relu" , a_ = 4 ):
super().__init__()
lowerCamelCase_ : List[Any] = in_channels != out_channels or stride != 1
lowerCamelCase_ : Optional[Any] = out_channels // reduction
lowerCamelCase_ : Optional[Any] = (
ResNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase_ : str = nn.Sequential(
ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , activation=__lowerCAmelCase ) , )
lowerCamelCase_ : Any = ACTaFN[activation]
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Any = hidden_state
lowerCamelCase_ : str = self.layer(__lowerCAmelCase )
lowerCamelCase_ : Union[str, Any] = self.shortcut(__lowerCAmelCase )
hidden_state += residual
lowerCamelCase_ : List[str] = self.activation(__lowerCAmelCase )
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ , a_ = 2 , a_ = 2 , ):
super().__init__()
lowerCamelCase_ : str = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
lowerCamelCase_ : Optional[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase , activation=config.hidden_act ) , *[layer(__lowerCAmelCase , __lowerCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : List[Any] = input
for layer in self.layers:
lowerCamelCase_ : Optional[Any] = layer(__lowerCAmelCase )
return hidden_state
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , a_ ):
super().__init__()
lowerCamelCase_ : List[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCamelCase_ : int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__lowerCAmelCase , config.depths[1:] ):
self.stages.append(ResNetStage(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , depth=__lowerCAmelCase ) )
def _UpperCamelCase ( self , a_ , a_ = False , a_ = True ):
lowerCamelCase_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCamelCase_ : Any = hidden_states + (hidden_state,)
lowerCamelCase_ : str = stage_module(__lowerCAmelCase )
if output_hidden_states:
lowerCamelCase_ : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=__lowerCAmelCase , hidden_states=__lowerCAmelCase , )
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ResNetConfig
__UpperCAmelCase : List[Any] = '''resnet'''
__UpperCAmelCase : Union[str, Any] = '''pixel_values'''
__UpperCAmelCase : Optional[int] = True
def _UpperCamelCase ( self , a_ ):
if isinstance(__lowerCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(__lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _UpperCamelCase ( self , a_ , a_=False ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase_ : Optional[int] = value
__magic_name__ = R'''\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'''
__magic_name__ = R'''\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'''
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''', __lowerCamelCase, )
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ ):
super().__init__(__lowerCAmelCase )
lowerCamelCase_ : Tuple = config
lowerCamelCase_ : Dict = ResNetEmbeddings(__lowerCAmelCase )
lowerCamelCase_ : Optional[int] = ResNetEncoder(__lowerCAmelCase )
lowerCamelCase_ : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCamelCase ( self , a_ , a_ = None , a_ = None ):
lowerCamelCase_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ : int = self.embedder(__lowerCAmelCase )
lowerCamelCase_ : Union[str, Any] = self.encoder(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
lowerCamelCase_ : Tuple = encoder_outputs[0]
lowerCamelCase_ : Optional[Any] = self.pooler(__lowerCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCAmelCase , pooler_output=__lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''', __lowerCamelCase, )
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ ):
super().__init__(__lowerCAmelCase )
lowerCamelCase_ : Tuple = config.num_labels
lowerCamelCase_ : Optional[int] = ResNetModel(__lowerCAmelCase )
# classification head
lowerCamelCase_ : List[str] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCamelCase ( self , a_ = None , a_ = None , a_ = None , a_ = None , ):
lowerCamelCase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ : Union[str, Any] = self.resnet(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
lowerCamelCase_ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase_ : Tuple = self.classifier(__lowerCAmelCase )
lowerCamelCase_ : str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase_ : Any = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase_ : Optional[Any] = "single_label_classification"
else:
lowerCamelCase_ : Tuple = "multi_label_classification"
if self.config.problem_type == "regression":
lowerCamelCase_ : Optional[int] = MSELoss()
if self.num_labels == 1:
lowerCamelCase_ : Dict = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCamelCase_ : Optional[Any] = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase_ : Optional[Any] = CrossEntropyLoss()
lowerCamelCase_ : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase_ : Any = BCEWithLogitsLoss()
lowerCamelCase_ : Any = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
if not return_dict:
lowerCamelCase_ : Union[str, Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''', __lowerCamelCase, )
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ ):
super().__init__(__lowerCAmelCase )
super()._init_backbone(__lowerCAmelCase )
lowerCamelCase_ : Any = [config.embedding_size] + config.hidden_sizes
lowerCamelCase_ : Optional[Any] = ResNetEmbeddings(__lowerCAmelCase )
lowerCamelCase_ : int = ResNetEncoder(__lowerCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@replace_return_docstrings(output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC )
def _UpperCamelCase ( self , a_ , a_ = None , a_ = None ):
lowerCamelCase_ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ : Any = self.embedder(__lowerCAmelCase )
lowerCamelCase_ : Optional[int] = self.encoder(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
lowerCamelCase_ : Optional[int] = outputs.hidden_states
lowerCamelCase_ : int = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowerCamelCase_ : Union[str, Any] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__lowerCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=__lowerCAmelCase , )
| 707 |
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : Any = set()
# Replace all the whitespace in our sentence
lowerCamelCase_ : str = input_str.replace(" " , "")
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(lowerCAmelCase_) == 26
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = [False] * 26
for char in input_str:
if char.islower():
lowerCamelCase_ : List[Any] = True
elif char.isupper():
lowerCamelCase_ : Optional[int] = True
return all(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def __magic_name__ ( ):
'''simple docstring'''
from timeit import timeit
lowerCamelCase_ : Optional[int] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_faster()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_fastest()" , setup=lowerCAmelCase_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 73 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__magic_name__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *a_ , **a_ ):
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 708 |
__magic_name__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase_ : List[Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(lowerCAmelCase_)}"""
)
raise ValueError(lowerCAmelCase_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=4 , ):
lowerCamelCase_ : str = parent
lowerCamelCase_ : List[str] = batch_size
lowerCamelCase_ : str = seq_length
lowerCamelCase_ : str = is_training
lowerCamelCase_ : Dict = use_attention_mask
lowerCamelCase_ : str = use_token_type_ids
lowerCamelCase_ : List[str] = use_labels
lowerCamelCase_ : Optional[int] = vocab_size
lowerCamelCase_ : Tuple = hidden_size
lowerCamelCase_ : Optional[int] = num_hidden_layers
lowerCamelCase_ : Tuple = num_attention_heads
lowerCamelCase_ : Any = intermediate_size
lowerCamelCase_ : str = hidden_act
lowerCamelCase_ : Any = hidden_dropout_prob
lowerCamelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase_ : Optional[int] = max_position_embeddings
lowerCamelCase_ : Tuple = type_vocab_size
lowerCamelCase_ : List[str] = type_sequence_label_size
lowerCamelCase_ : List[Any] = initializer_range
lowerCamelCase_ : str = num_choices
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : List[Any] = None
if self.use_attention_mask:
lowerCamelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : Dict = None
if self.use_token_type_ids:
lowerCamelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : List[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowerCamelCase_ : int = config_and_inputs
lowerCamelCase_ : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( a__, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = FlaxAlbertModelTester(self )
@slow
def _UpperCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowerCamelCase_ : Optional[int] = model_class_name.from_pretrained("albert-base-v2" )
lowerCamelCase_ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = FlaxAlbertModel.from_pretrained("albert-base-v2" )
lowerCamelCase_ : Union[str, Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase_ : int = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCamelCase_ : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
lowerCamelCase_ : List[Any] = (1, 11, 768)
self.assertEqual(output.shape , lowerCAmelCase__ )
lowerCamelCase_ : Optional[int] = np.array(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) )
| 709 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''spiece.model'''}
__magic_name__ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__magic_name__ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 3
__magic_name__ = 4
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] = '''left'''
def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = do_lower_case
lowerCamelCase_ : str = remove_space
lowerCamelCase_ : Tuple = keep_accents
lowerCamelCase_ : Dict = vocab_file
lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def _UpperCamelCase ( self ):
return len(self.sp_model )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase_ : Any = self.__dict__.copy()
lowerCamelCase_ : Optional[int] = None
return state
def __setstate__( self , a_ ):
lowerCamelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : int = {}
lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , a_ ):
if self.remove_space:
lowerCamelCase_ : Optional[int] = " ".join(inputs.strip().split() )
else:
lowerCamelCase_ : str = inputs
lowerCamelCase_ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase_ : Dict = unicodedata.normalize("NFKD" , a_ )
lowerCamelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
lowerCamelCase_ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : List[Any] = self.preprocess_text(a_ )
lowerCamelCase_ : Optional[int] = self.sp_model.encode(a_ , out_type=a_ )
lowerCamelCase_ : List[str] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ : int = cur_pieces[1:]
else:
lowerCamelCase_ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def _UpperCamelCase ( self , a_ ):
return self.sp_model.PieceToId(a_ )
def _UpperCamelCase ( self , a_ ):
return self.sp_model.IdToPiece(a_ )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Dict = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def _UpperCamelCase ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ):
lowerCamelCase_ : int = kwargs.pop("use_source_tokenizer" , a_ )
lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
lowerCamelCase_ : Union[str, Any] = []
sub_texts.append(a_ )
else:
current_sub_text.append(a_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase_ : Union[str, Any] = "".join(a_ )
lowerCamelCase_ : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase_ : List[Any] = self.clean_up_tokenization(a_ )
return clean_text
else:
return text
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Any = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 73 | 0 |
import logging
from transformers import PretrainedConfig
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : str = '''bertabs'''
def __init__( self , a_=3_0522 , a_=512 , a_=6 , a_=512 , a_=8 , a_=512 , a_=0.2 , a_=6 , a_=768 , a_=8 , a_=2048 , a_=0.2 , **a_ , ):
super().__init__(**__lowerCAmelCase )
lowerCamelCase_ : Optional[Any] = vocab_size
lowerCamelCase_ : List[Any] = max_pos
lowerCamelCase_ : int = enc_layers
lowerCamelCase_ : List[str] = enc_hidden_size
lowerCamelCase_ : Union[str, Any] = enc_heads
lowerCamelCase_ : Tuple = enc_ff_size
lowerCamelCase_ : str = enc_dropout
lowerCamelCase_ : Union[str, Any] = dec_layers
lowerCamelCase_ : Union[str, Any] = dec_hidden_size
lowerCamelCase_ : Union[str, Any] = dec_heads
lowerCamelCase_ : Dict = dec_ff_size
lowerCamelCase_ : Optional[Any] = dec_dropout
| 710 |
def __magic_name__ ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = True):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return int((number_a + number_a) / 2)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(lowerCAmelCase_) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowerCamelCase_ : Optional[int] = lower
lowerCamelCase_ : Tuple = higher
lowerCamelCase_ : Union[str, Any] = []
while True:
lowerCamelCase_ : Optional[int] = get_avg(lowerCAmelCase_ , lowerCAmelCase_)
last_numbers.append(lowerCAmelCase_)
if answer(lowerCAmelCase_) == "low":
lowerCamelCase_ : Any = number
elif answer(lowerCAmelCase_) == "high":
lowerCamelCase_ : Optional[int] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""")
print(F"""details : {last_numbers!s}""")
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = int(input("Enter lower value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter high value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter value to guess : ").strip())
guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
main()
| 73 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''cvt'''
def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : str = patch_sizes
lowerCamelCase_ : List[Any] = patch_stride
lowerCamelCase_ : str = patch_padding
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Union[str, Any] = num_heads
lowerCamelCase_ : Optional[Any] = depth
lowerCamelCase_ : int = mlp_ratio
lowerCamelCase_ : Union[str, Any] = attention_drop_rate
lowerCamelCase_ : Optional[Any] = drop_rate
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Union[str, Any] = qkv_bias
lowerCamelCase_ : int = cls_token
lowerCamelCase_ : int = qkv_projection_method
lowerCamelCase_ : int = kernel_qkv
lowerCamelCase_ : Optional[Any] = padding_kv
lowerCamelCase_ : Optional[int] = stride_kv
lowerCamelCase_ : Optional[int] = padding_q
lowerCamelCase_ : List[Any] = stride_q
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = layer_norm_eps
| 73 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 712 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = AltDiffusionPipeline
__UpperCAmelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCamelCase_ : Tuple = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
lowerCamelCase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCamelCase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
lowerCamelCase_ : Optional[Any] = CLIPTextModel(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Dict = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowerCamelCase_ : Union[str, Any] = 77
lowerCamelCase_ : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _UpperCamelCase ( self , a_ , a_=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith("mps" ):
lowerCamelCase_ : Any = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase_ : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Optional[int] = self.get_dummy_components()
torch.manual_seed(0 )
lowerCamelCase_ : Tuple = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCamelCase_ : Tuple = RobertaSeriesModelWithTransformation(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : int = text_encoder
lowerCamelCase_ : List[Any] = AltDiffusionPipeline(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Optional[Any] = alt_pipe.to(_SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : List[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Optional[int] = "A photo of an astronaut"
lowerCamelCase_ : str = alt_pipe(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Any = output.images
lowerCamelCase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ : List[str] = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : str = self.get_dummy_components()
lowerCamelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCamelCase_ : Any = RobertaSeriesModelWithTransformation(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Any = text_encoder
lowerCamelCase_ : List[Any] = AltDiffusionPipeline(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Union[str, Any] = alt_pipe.to(_SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : List[str] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : List[str] = alt_pipe(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : List[str] = output.images
lowerCamelCase_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ : Optional[Any] = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Optional[Any] = alt_pipe.to(_SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Optional[int] = "A painting of a squirrel eating a burger"
lowerCamelCase_ : Dict = torch.manual_seed(0 )
lowerCamelCase_ : Any = alt_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=20 , output_type="np" )
lowerCamelCase_ : Union[str, Any] = output.images
lowerCamelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : Any = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
lowerCamelCase_ : Optional[int] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Any = alt_pipe.to(_SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ : int = "A painting of a squirrel eating a burger"
lowerCamelCase_ : Dict = torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = alt_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="numpy" )
lowerCamelCase_ : Union[str, Any] = output.images
lowerCamelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : int = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 713 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''EncodecFeatureExtractor'''
__UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
lowerCamelCase_ : Optional[Any] = self.feature_extractor
lowerCamelCase_ : Optional[int] = False
def _UpperCamelCase ( self , a_=None , a_=None , a_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ )
def __call__( self , *a_ , **a_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
lowerCamelCase_ : str = kwargs.pop("audio" , a_ )
lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : int = args[0]
lowerCamelCase_ : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ )
if audio is not None:
lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCamelCase_ : Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCamelCase_ : int = audio_inputs["padding_mask"]
return inputs
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : Optional[int] = args[0]
lowerCamelCase_ : Optional[Any] = args[1:]
if audio_values is not None:
return self._decode_audio(a_ , padding_mask=a_ )
else:
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Any = to_numpy(a_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape
if padding_mask is None:
return list(a_ )
lowerCamelCase_ : Tuple = to_numpy(a_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1]
lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value
lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ )
lowerCamelCase_ : str = audio_values.tolist()
for i in range(a_ ):
lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 )
return audio_values
| 73 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_snake_case = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
_snake_case = {
'''unc-nlp/lxmert-base-uncased''': 5_1_2,
}
_snake_case = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = LxmertTokenizer
def __init__( self , a_=None , a_=None , a_=True , a_="[UNK]" , a_="[SEP]" , a_="[PAD]" , a_="[CLS]" , a_="[MASK]" , a_=True , a_=None , **a_ , ):
super().__init__(
a_ , tokenizer_file=a_ , do_lower_case=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , tokenize_chinese_chars=a_ , strip_accents=a_ , **a_ , )
lowerCamelCase_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , a_ ) != do_lower_case
or normalizer_state.get("strip_accents" , a_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a_ ) != tokenize_chinese_chars
):
lowerCamelCase_ : Optional[int] = getattr(a_ , normalizer_state.pop("type" ) )
lowerCamelCase_ : Dict = do_lower_case
lowerCamelCase_ : Union[str, Any] = strip_accents
lowerCamelCase_ : str = tokenize_chinese_chars
lowerCamelCase_ : Optional[int] = normalizer_class(**a_ )
lowerCamelCase_ : Union[str, Any] = do_lower_case
def _UpperCamelCase ( self , a_ , a_=None ):
lowerCamelCase_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Any = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : int = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
| 714 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase_) , lowerCAmelCase_)
return number - int(lowerCAmelCase_)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 73 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : UNetaDModel
__UpperCAmelCase : KarrasVeScheduler
def __init__( self , a_ , a_ ):
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase )
@torch.no_grad()
def __call__( self , a_ = 1 , a_ = 50 , a_ = None , a_ = "pil" , a_ = True , **a_ , ):
lowerCamelCase_ : str = self.unet.config.sample_size
lowerCamelCase_ : int = (batch_size, 3, img_size, img_size)
lowerCamelCase_ : List[str] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
lowerCamelCase_ : List[Any] = randn_tensor(__lowercase , generator=__lowercase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
lowerCamelCase_ : Tuple = self.scheduler.schedule[t]
lowerCamelCase_ : Union[str, Any] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
lowerCamelCase_ ,lowerCamelCase_ : Tuple = self.scheduler.add_noise_to_input(__lowercase , __lowercase , generator=__lowercase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
lowerCamelCase_ : int = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
lowerCamelCase_ : Any = self.scheduler.step(__lowercase , __lowercase , __lowercase , __lowercase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
lowerCamelCase_ : List[str] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
lowerCamelCase_ : Optional[int] = self.scheduler.step_correct(
__lowercase , __lowercase , __lowercase , __lowercase , step_output.prev_sample , step_output["derivative"] , )
lowerCamelCase_ : Optional[Any] = step_output.prev_sample
lowerCamelCase_ : int = (sample / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : List[str] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ : Tuple = self.numpy_to_pil(__lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase )
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ):
lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ : str = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : List[str] = apply_ocr
def _UpperCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "apply_ocr" ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# with apply_OCR = True
lowerCamelCase_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 73 | 0 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , a_=[0.5, 0.5, 0.5] , a_=[0.5, 0.5, 0.5] , ):
lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ : Tuple = parent
lowerCamelCase_ : List[str] = batch_size
lowerCamelCase_ : List[Any] = num_channels
lowerCamelCase_ : List[str] = image_size
lowerCamelCase_ : Any = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Dict = do_resize
lowerCamelCase_ : Optional[int] = size
lowerCamelCase_ : Union[str, Any] = do_normalize
lowerCamelCase_ : Union[str, Any] = image_mean
lowerCamelCase_ : List[str] = image_std
def _UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowerCAmelCase__ ( __snake_case, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = DPTImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = DPTImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Union[str, Any] = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
lowerCamelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : List[str] = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : List[str] = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 716 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''luke'''
def __init__( self , a_=5_0267 , a_=50_0000 , a_=768 , a_=256 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=True , a_=None , a_=1 , a_=0 , a_=2 , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : Optional[int] = entity_vocab_size
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Dict = entity_emb_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Tuple = intermediate_size
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Optional[Any] = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : Optional[int] = use_entity_aware_attention
lowerCamelCase_ : str = classifier_dropout
| 73 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCamelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ["""image_processor""", """tokenizer"""]
__UpperCAmelCase : List[Any] = """BridgeTowerImageProcessor"""
__UpperCAmelCase : Optional[Any] = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
def __call__( self , a_ , a_ = None , a_ = True , a_ = False , a_ = None , a_ = None , a_ = 0 , a_ = None , a_ = None , a_ = None , a_ = False , a_ = False , a_ = False , a_ = False , a_ = True , a_ = None , **a_ , ):
lowerCamelCase_ : Tuple = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
# add pixel_values + pixel_mask
lowerCamelCase_ : Optional[Any] = self.image_processor(
a_ , return_tensors=a_ , do_normalize=a_ , do_center_crop=a_ , **a_ )
encoding.update(a_ )
return encoding
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self.tokenizer.model_input_names
lowerCamelCase_ : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 717 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : Optional[datasets.Features] = None
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
import pyspark
def generate_fn():
lowerCamelCase_ : Dict = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id"))
for partition_id in partition_order:
lowerCamelCase_ : Dict = df_with_partition_id.select("*").where(F"""part_id = {partition_id}""").drop("part_id")
lowerCamelCase_ : Dict = partition_df.collect()
lowerCamelCase_ : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , a_ , a_=None , ):
lowerCamelCase_ : Dict = df
lowerCamelCase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase_ : int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : Dict = self.split_shard_indices_by_worker(a_ , a_ )
return SparkExamplesIterable(self.df , partition_order=a_ )
@property
def _UpperCamelCase ( self ):
return len(self.partition_order )
class lowerCAmelCase__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__UpperCAmelCase : Any = SparkConfig
def __init__( self , a_ , a_ = None , a_ = None , **a_ , ):
import pyspark
lowerCamelCase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase_ : Optional[Any] = df
lowerCamelCase_ : List[Any] = working_dir
super().__init__(
cache_dir=a_ , config_name=str(self.df.semanticHash() ) , **a_ , )
def _UpperCamelCase ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(a_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a_ )
lowerCamelCase_ : Optional[Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a_ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase_ : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _UpperCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self , a_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _UpperCamelCase ( self , a_ ):
import pyspark
def get_arrow_batch_size(a_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
lowerCamelCase_ : str = self.df.count()
lowerCamelCase_ : List[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase_ : Any = (
self.df.limit(a_ )
.repartition(1 )
.mapInArrow(a_ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase_ : int = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase_ : Union[str, Any] = min(a_ , int(approx_total_size / max_shard_size ) )
lowerCamelCase_ : int = self.df.repartition(a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , ):
import pyspark
lowerCamelCase_ : str = ParquetWriter if file_format == "parquet" else ArrowWriter
lowerCamelCase_ : int = os.path.join(self._working_dir , os.path.basename(a_ ) ) if self._working_dir else fpath
lowerCamelCase_ : Optional[Any] = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase_ : int = self.config.features
lowerCamelCase_ : Any = self._writer_batch_size
lowerCamelCase_ : Tuple = self._fs.storage_options
def write_arrow(a_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase_ : List[Any] = pyspark.TaskContext().taskAttemptId()
lowerCamelCase_ : Optional[int] = next(a_ , a_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Optional[int] = writer_class(
features=a_ , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(a_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase_ ,lowerCamelCase_ : List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
lowerCamelCase_ : List[str] = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , writer_batch_size=a_ , storage_options=a_ , embed_local_files=a_ , )
lowerCamelCase_ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(a_ )
if writer._num_bytes > 0:
lowerCamelCase_ ,lowerCamelCase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a_ ) ):
lowerCamelCase_ : str = os.path.join(os.path.dirname(a_ ) , os.path.basename(a_ ) )
shutil.move(a_ , a_ )
lowerCamelCase_ : int = (
self.df.mapInArrow(a_ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _UpperCamelCase ( self , a_ , a_ = "arrow" , a_ = None , a_ = None , **a_ , ):
self._validate_cache_dir()
lowerCamelCase_ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a_ )
lowerCamelCase_ : Dict = not is_remote_filesystem(self._fs )
lowerCamelCase_ : List[str] = os.path.join if is_local else posixpath.join
lowerCamelCase_ : Any = "-TTTTT-SSSSS-of-NNNNN"
lowerCamelCase_ : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
lowerCamelCase_ : int = path_join(self._output_dir , a_ )
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : int = 0
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Any = []
for task_id, content in self._prepare_split_single(a_ , a_ , a_ ):
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a_ )
lowerCamelCase_ : Dict = total_num_examples
lowerCamelCase_ : Any = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
lowerCamelCase_ : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase_ : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a_ , a_ , a_ , ):
rename(
a_ , fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , F"""{global_shard_id:05d}""" ).replace("NNNNN" , F"""{total_shards:05d}""" ) , )
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : Dict = 0
for i in range(len(a_ ) ):
lowerCamelCase_ ,lowerCamelCase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(a_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a_ , len(a_ ) ).map(lambda a_ : _rename_shard(*a_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase_ : int = 0
lowerCamelCase_ : Optional[int] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F"""{shard_id:05d}""" ).replace("TTTTT" , F"""{task_id:05d}""" ) , fpath.replace(a_ , "" ) , )
def _UpperCamelCase ( self , a_ , ):
return SparkExamplesIterable(self.df )
| 73 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = "nllb-moe"
__UpperCAmelCase : Union[str, Any] = ["past_key_values"]
__UpperCAmelCase : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , a_=12_8112 , a_=1024 , a_=12 , a_=4096 , a_=16 , a_=12 , a_=4096 , a_=16 , a_=0.05 , a_=0.05 , a_=True , a_=True , a_="relu" , a_=1024 , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.02 , a_=2 , a_=True , a_=False , a_="float32" , a_=False , a_=128 , a_=64 , a_=4 , a_=4 , a_=0.0_01 , a_=0.0_01 , a_="all" , a_=False , a_=False , a_=1.0 , a_=0.2 , a_=1 , a_=0 , a_=2 , a_=False , **a_ , ):
lowerCamelCase_ : Any = vocab_size
lowerCamelCase_ : Union[str, Any] = max_position_embeddings
lowerCamelCase_ : int = d_model
lowerCamelCase_ : Optional[Any] = encoder_ffn_dim
lowerCamelCase_ : Any = encoder_layers
lowerCamelCase_ : List[str] = encoder_attention_heads
lowerCamelCase_ : List[Any] = decoder_ffn_dim
lowerCamelCase_ : List[str] = decoder_layers
lowerCamelCase_ : List[str] = decoder_attention_heads
lowerCamelCase_ : Optional[int] = dropout
lowerCamelCase_ : str = attention_dropout
lowerCamelCase_ : Any = activation_dropout
lowerCamelCase_ : str = activation_function
lowerCamelCase_ : Dict = init_std
lowerCamelCase_ : int = encoder_layerdrop
lowerCamelCase_ : Any = decoder_layerdrop
lowerCamelCase_ : Any = use_cache
lowerCamelCase_ : List[Any] = encoder_layers
lowerCamelCase_ : str = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ : str = router_z_loss_coef
lowerCamelCase_ : str = router_aux_loss_coef
lowerCamelCase_ : List[str] = decoder_sparse_step
lowerCamelCase_ : Union[str, Any] = encoder_sparse_step
lowerCamelCase_ : Optional[Any] = num_experts
lowerCamelCase_ : Union[str, Any] = expert_capacity
lowerCamelCase_ : Tuple = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
lowerCamelCase_ : Union[str, Any] = router_dtype
lowerCamelCase_ : str = router_ignore_padding_tokens
lowerCamelCase_ : int = batch_prioritized_routing
lowerCamelCase_ : List[Any] = second_expert_policy
lowerCamelCase_ : Any = normalize_router_prob_before_dropping
lowerCamelCase_ : Any = moe_eval_capacity_token_fraction
lowerCamelCase_ : Any = moe_token_dropout
lowerCamelCase_ : Tuple = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 718 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase_ : List[str] = cst_fwd.get(lowerCAmelCase_ , np.inf)
lowerCamelCase_ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt))
lowerCamelCase_ : Optional[int] = new_cost_f
lowerCamelCase_ : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase_ : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = -1
lowerCamelCase_ : Tuple = set()
lowerCamelCase_ : Dict = set()
lowerCamelCase_ : int = {source: 0}
lowerCamelCase_ : str = {destination: 0}
lowerCamelCase_ : Tuple = {source: None}
lowerCamelCase_ : Dict = {destination: None}
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase_ : List[str] = np.inf
queue_forward.put((0, source))
queue_backward.put((0, destination))
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = queue_forward.get()
visited_forward.add(lowerCAmelCase_)
lowerCamelCase_ ,lowerCamelCase_ : str = queue_backward.get()
visited_backward.add(lowerCAmelCase_)
lowerCamelCase_ : Any = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
lowerCamelCase_ : Dict = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase_ : Union[str, Any] = shortest_distance
return shortest_path_distance
__magic_name__ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__magic_name__ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase__ ( __a ):
"""simple docstring"""
__UpperCAmelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 719 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ctrl'''
__UpperCAmelCase : Dict = ['''past_key_values''']
__UpperCAmelCase : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a_=24_6534 , a_=256 , a_=1280 , a_=8192 , a_=48 , a_=16 , a_=0.1 , a_=0.1 , a_=1E-6 , a_=0.02 , a_=True , **a_ , ):
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : Any = n_positions
lowerCamelCase_ : Optional[int] = n_embd
lowerCamelCase_ : List[Any] = n_layer
lowerCamelCase_ : Union[str, Any] = n_head
lowerCamelCase_ : str = dff
lowerCamelCase_ : Tuple = resid_pdrop
lowerCamelCase_ : Any = embd_pdrop
lowerCamelCase_ : Dict = layer_norm_epsilon
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : Any = use_cache
super().__init__(**a_ )
| 73 | 0 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self , a_ , a_=13 , a_=32 , a_=2 , a_=3 , a_=16 , a_=[32, 64, 128] , a_=[1, 2, 1] , a_=[2, 2, 4] , a_=2 , a_=2.0 , a_=True , a_=0.0 , a_=0.0 , a_=0.1 , a_="gelu" , a_=False , a_=True , a_=0.02 , a_=1E-5 , a_=True , a_=None , a_=True , a_=10 , a_=8 , a_=["stage1", "stage2"] , a_=[1, 2] , ):
lowerCamelCase_ : Dict = parent
lowerCamelCase_ : Optional[Any] = batch_size
lowerCamelCase_ : Union[str, Any] = image_size
lowerCamelCase_ : Optional[int] = patch_size
lowerCamelCase_ : Any = num_channels
lowerCamelCase_ : Any = embed_dim
lowerCamelCase_ : Dict = hidden_sizes
lowerCamelCase_ : List[Any] = depths
lowerCamelCase_ : Tuple = num_heads
lowerCamelCase_ : List[Any] = window_size
lowerCamelCase_ : str = mlp_ratio
lowerCamelCase_ : str = qkv_bias
lowerCamelCase_ : str = hidden_dropout_prob
lowerCamelCase_ : Dict = attention_probs_dropout_prob
lowerCamelCase_ : Tuple = drop_path_rate
lowerCamelCase_ : Dict = hidden_act
lowerCamelCase_ : Tuple = use_absolute_embeddings
lowerCamelCase_ : List[str] = patch_norm
lowerCamelCase_ : List[str] = layer_norm_eps
lowerCamelCase_ : str = initializer_range
lowerCamelCase_ : Tuple = is_training
lowerCamelCase_ : int = scope
lowerCamelCase_ : Union[str, Any] = use_labels
lowerCamelCase_ : List[str] = type_sequence_label_size
lowerCamelCase_ : str = encoder_stride
lowerCamelCase_ : List[str] = out_features
lowerCamelCase_ : Optional[int] = out_indices
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : str = None
if self.use_labels:
lowerCamelCase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : str = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : Tuple = FocalNetModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Tuple = model(__a )
lowerCamelCase_ : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : List[Any] = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase_ : Dict = None
lowerCamelCase_ : Dict = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Any = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : Tuple = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase_ : List[str] = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase_ : List[str] = 1
lowerCamelCase_ : Any = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ : Tuple = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ ):
lowerCamelCase_ : Optional[Any] = self.type_sequence_label_size
lowerCamelCase_ : Optional[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase_ : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ : int = 1
lowerCamelCase_ : List[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = self.prepare_config_and_inputs()
lowerCamelCase_ : Optional[int] = config_and_inputs
lowerCamelCase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __lowercase, __lowercase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Optional[int] = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Optional[int] = False
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = FocalNetModelTester(self )
lowerCamelCase_ : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def _UpperCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ):
return
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def _UpperCamelCase ( self ):
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ : int = model_class(__a )
lowerCamelCase_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Any = [*signature.parameters.keys()]
lowerCamelCase_ : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase_ : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase_ : List[str] = outputs.hidden_states
lowerCamelCase_ : Tuple = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
lowerCamelCase_ : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase_ : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase_ : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase_ : Tuple = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ : List[str] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : List[str] = 3
lowerCamelCase_ : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase_ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase_ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def _UpperCamelCase ( self ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : List[str] = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : int = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase_ : int = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self ):
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__a )
lowerCamelCase_ : Any = self.default_image_processor
lowerCamelCase_ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCamelCase_ : int = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase_ : Any = model(**__a )
# verify the logits
lowerCamelCase_ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase_ : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class lowerCAmelCase__ ( __lowercase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (FocalNetBackbone,) if is_torch_available() else ()
__UpperCAmelCase : Optional[int] = FocalNetConfig
__UpperCAmelCase : str = False
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = FocalNetModelTester(self )
| 720 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCamelCase )}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
__UpperCAmelCase : str = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
def _UpperCamelCase ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase : Optional[str] = field(default=__lowerCamelCase, metadata={'''help''': '''The input training data file (a text file).'''} )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : Optional[str] = field(
default=__lowerCamelCase, metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''}, )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__UpperCAmelCase : Optional[int] = field(
default=5, metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
}, )
__UpperCAmelCase : Optional[int] = field(
default=__lowerCamelCase, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
__UpperCAmelCase : float = field(
default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__UpperCAmelCase : bool = field(
default=__lowerCamelCase, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
def _UpperCamelCase ( self ):
if self.train_file is not None:
lowerCamelCase_ : str = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase_ : Union[str, Any] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(lowerCAmelCase_ , "r" , encoding="utf-8") as f:
lowerCamelCase_ : Tuple = [json.loads(lowerCAmelCase_) for line in f.read().splitlines() if (len(lowerCAmelCase_) > 0 and not line.isspace())]
assert len(lowerCAmelCase_) == len(lowerCAmelCase_)
lowerCamelCase_ : Any = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase_ : List[Any] = refs
return Dataset.from_dict(lowerCAmelCase_)
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase_ : List[str] = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : Dict = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}""")
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name)
if "validation" not in datasets.keys():
lowerCamelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
lowerCamelCase_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
lowerCamelCase_ : Dict = {}
if data_args.train_file is not None:
lowerCamelCase_ : str = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Any = data_args.validation_file
lowerCamelCase_ : Any = data_args.train_file.split(".")[-1]
if extension == "txt":
lowerCamelCase_ : List[str] = "text"
lowerCamelCase_ : Dict = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : Optional[Any] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""")
config.update_from_string(model_args.config_overrides)
logger.info(F"""New config: {config}""")
lowerCamelCase_ : List[str] = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_)
elif model_args.model_name_or_path:
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name.")
if model_args.model_name_or_path:
lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch")
lowerCamelCase_ : Dict = AutoModelForMaskedLM.from_config(lowerCAmelCase_)
model.resize_token_embeddings(len(lowerCAmelCase_))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase_ : Optional[Any] = datasets["train"].column_names
else:
lowerCamelCase_ : Dict = datasets["validation"].column_names
lowerCamelCase_ : Union[str, Any] = "text" if "text" in column_names else column_names[0]
lowerCamelCase_ : Optional[Any] = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_):
# Remove empty lines
lowerCamelCase_ : str = [line for line in examples["text"] if len(lowerCAmelCase_) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length)
lowerCamelCase_ : str = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase_ : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file)
if data_args.validation_ref_file is not None:
lowerCamelCase_ : List[str] = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file)
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase_ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase_ : Union[str, Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase_ : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
lowerCamelCase_ : int = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase_ : Dict = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
lowerCamelCase_ : Dict = model_args.model_name_or_path
else:
lowerCamelCase_ : int = None
lowerCamelCase_ : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_)
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Tuple = os.path.join(training_args.output_dir , "train_results.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(train_result.metrics.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json"))
# Evaluation
lowerCamelCase_ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
lowerCamelCase_ : Tuple = trainer.evaluate()
lowerCamelCase_ : str = math.exp(eval_output["eval_loss"])
lowerCamelCase_ : Tuple = perplexity
lowerCamelCase_ : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt")
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w") as writer:
logger.info("***** Eval results *****")
for key, value in sorted(results.items()):
logger.info(F""" {key} = {value}""")
writer.write(F"""{key} = {value}\n""")
return results
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 73 | 0 |
import collections
import os
import re
from pathlib import Path
__magic_name__ = "src/transformers"
# Matches is_xxx_available()
__magic_name__ = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__magic_name__ = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__magic_name__ = re.compile(R'''\s+\"\S*\":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__magic_name__ = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__magic_name__ = re.compile(R'''^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__magic_name__ = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__magic_name__ = re.compile(R'''^\s+\"([^\"]+)\",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__magic_name__ = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__magic_name__ = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__magic_name__ = re.compile(R'''^\s*try:''')
# Catches a line with else:
__magic_name__ = re.compile(R'''^\s*else:''')
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if _re_test_backend.search(_UpperCamelCase) is None:
return None
lowerCamelCase_ : Optional[int] = [b[0] for b in _re_backend.findall(_UpperCamelCase)]
backends.sort()
return "_and_".join(_UpperCamelCase)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n") as f:
lowerCamelCase_ : List[str] = f.readlines()
lowerCamelCase_ : Any = 0
while line_index < len(_UpperCamelCase) and not lines[line_index].startswith("_import_structure = {"):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_UpperCamelCase):
return None
# First grab the objects without a specific backend in _import_structure
lowerCamelCase_ : Dict = []
while not lines[line_index].startswith("if TYPE_CHECKING") and find_backend(lines[line_index]) is None:
lowerCamelCase_ : Tuple = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_UpperCamelCase):
lowerCamelCase_ : Optional[Any] = _re_one_line_import_struct.search(_UpperCamelCase).groups()[0]
lowerCamelCase_ : Tuple = re.findall(R"\[([^\]]+)\]" , _UpperCamelCase)
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", ")])
line_index += 1
continue
lowerCamelCase_ : List[Any] = _re_import_struct_key_value.search(_UpperCamelCase)
if single_line_import_search is not None:
lowerCamelCase_ : Union[str, Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", ") if len(_UpperCamelCase) > 0]
objects.extend(_UpperCamelCase)
elif line.startswith(" " * 8 + "\""):
objects.append(line[9:-3])
line_index += 1
lowerCamelCase_ : Dict = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING"):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCamelCase_ : int = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
lowerCamelCase_ : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
lowerCamelCase_ : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 4):
lowerCamelCase_ : int = lines[line_index]
if _re_import_struct_add_one.search(_UpperCamelCase) is not None:
objects.append(_re_import_struct_add_one.search(_UpperCamelCase).groups()[0])
elif _re_import_struct_add_many.search(_UpperCamelCase) is not None:
lowerCamelCase_ : Dict = _re_import_struct_add_many.search(_UpperCamelCase).groups()[0].split(", ")
lowerCamelCase_ : List[Any] = [obj[1:-1] for obj in imports if len(_UpperCamelCase) > 0]
objects.extend(_UpperCamelCase)
elif _re_between_brackets.search(_UpperCamelCase) is not None:
lowerCamelCase_ : int = _re_between_brackets.search(_UpperCamelCase).groups()[0].split(", ")
lowerCamelCase_ : List[str] = [obj[1:-1] for obj in imports if len(_UpperCamelCase) > 0]
objects.extend(_UpperCamelCase)
elif _re_quote_object.search(_UpperCamelCase) is not None:
objects.append(_re_quote_object.search(_UpperCamelCase).groups()[0])
elif line.startswith(" " * 8 + "\""):
objects.append(line[9:-3])
elif line.startswith(" " * 12 + "\""):
objects.append(line[13:-3])
line_index += 1
lowerCamelCase_ : str = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCamelCase_ : List[str] = []
while (
line_index < len(_UpperCamelCase)
and find_backend(lines[line_index]) is None
and not lines[line_index].startswith("else")
):
lowerCamelCase_ : Any = lines[line_index]
lowerCamelCase_ : Optional[Any] = _re_import.search(_UpperCamelCase)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 8):
objects.append(line[8:-2])
line_index += 1
lowerCamelCase_ : List[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(_UpperCamelCase):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCamelCase_ : Dict = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
lowerCamelCase_ : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
lowerCamelCase_ : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8):
lowerCamelCase_ : Any = lines[line_index]
lowerCamelCase_ : Tuple = _re_import.search(_UpperCamelCase)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 12):
objects.append(line[12:-2])
line_index += 1
lowerCamelCase_ : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
def find_duplicates(lowerCAmelCase_):
return [k for k, v in collections.Counter(_UpperCamelCase).items() if v > 1]
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ["Both sides of the init do not have the same backends!"]
lowerCamelCase_ : Tuple = []
for key in import_dict_objects.keys():
lowerCamelCase_ : List[Any] = find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""")
lowerCamelCase_ : int = find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""")
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
lowerCamelCase_ : Optional[int] = "base imports" if key == "none" else F"""{key} backend"""
errors.append(F"""Differences for {name}:""")
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""")
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""")
return errors
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Tuple = []
for root, _, files in os.walk(_UpperCamelCase):
if "__init__.py" in files:
lowerCamelCase_ : str = os.path.join(_UpperCamelCase , "__init__.py")
lowerCamelCase_ : Any = parse_init(_UpperCamelCase)
if objects is not None:
lowerCamelCase_ : List[Any] = analyze_results(*_UpperCamelCase)
if len(_UpperCamelCase) > 0:
lowerCamelCase_ : List[str] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("\n".join(_UpperCamelCase))
if len(_UpperCamelCase) > 0:
raise ValueError("\n\n".join(_UpperCamelCase))
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : int = []
for path, directories, files in os.walk(_UpperCamelCase):
for folder in directories:
# Ignore private modules
if folder.startswith("_"):
directories.remove(_UpperCamelCase)
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_UpperCamelCase) / folder).glob("*.py"))) == 0:
continue
lowerCamelCase_ : Any = str((Path(_UpperCamelCase) / folder).relative_to(_UpperCamelCase))
lowerCamelCase_ : List[str] = short_path.replace(os.path.sep , ".")
submodules.append(_UpperCamelCase)
for fname in files:
if fname == "__init__.py":
continue
lowerCamelCase_ : Optional[Any] = str((Path(_UpperCamelCase) / fname).relative_to(_UpperCamelCase))
lowerCamelCase_ : Optional[Any] = short_path.replace(".py" , "").replace(os.path.sep , ".")
if len(submodule.split(".")) == 1:
submodules.append(_UpperCamelCase)
return submodules
__magic_name__ = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def __magic_name__ ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
lowerCamelCase_ : int = direct_transformers_import(_UpperCamelCase)
lowerCamelCase_ : Tuple = set(transformers._import_structure.keys())
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_UpperCamelCase , "__init__.py") , "r") as f:
lowerCamelCase_ : str = f.read()
import_structure_keys.update(set(re.findall(R"import_structure\[\"([^\"]*)\"\]" , _UpperCamelCase)))
lowerCamelCase_ : Optional[int] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_UpperCamelCase) > 0:
lowerCamelCase_ : int = "\n".join(F"""- {module}""" for module in module_not_registered)
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
F"""{list_of_modules}\n"""
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.")
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 721 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
# setable values
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _UpperCamelCase ( cls ):
return cls()
@dataclass
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : KarrasVeSchedulerState
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
return True
@register_to_config
def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.0_07 , a_ = 80 , a_ = 0.05 , a_ = 50 , ):
pass
def _UpperCamelCase ( self ):
return KarrasVeSchedulerState.create()
def _UpperCamelCase ( self , a_ , a_ , a_ = () ):
lowerCamelCase_ : List[Any] = jnp.arange(0 , a_ )[::-1].copy()
lowerCamelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase_ : Union[str, Any] = random.split(a_ , num=1 )
lowerCamelCase_ : str = self.config.s_noise * random.normal(key=a_ , shape=sample.shape )
lowerCamelCase_ : List[str] = sigma + gamma * sigma
lowerCamelCase_ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCamelCase_ : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output
lowerCamelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
raise NotImplementedError()
| 73 | 0 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=64 , a_=5 , a_=4 , a_=64 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ):
lowerCamelCase_ : Dict = parent
lowerCamelCase_ : Optional[int] = batch_size
lowerCamelCase_ : Tuple = seq_length
lowerCamelCase_ : str = is_training
lowerCamelCase_ : Dict = use_input_mask
lowerCamelCase_ : Tuple = use_token_type_ids
lowerCamelCase_ : Union[str, Any] = use_labels
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : Dict = hidden_size
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : str = num_attention_heads
lowerCamelCase_ : int = intermediate_size
lowerCamelCase_ : Any = hidden_act
lowerCamelCase_ : List[Any] = hidden_dropout_prob
lowerCamelCase_ : int = attention_probs_dropout_prob
lowerCamelCase_ : Any = max_position_embeddings
lowerCamelCase_ : Optional[int] = type_vocab_size
lowerCamelCase_ : Optional[Any] = type_sequence_label_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : int = num_labels
lowerCamelCase_ : List[Any] = num_choices
lowerCamelCase_ : List[str] = scope
def _UpperCamelCase ( self ):
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : int = None
if self.use_input_mask:
lowerCamelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : Optional[Any] = None
lowerCamelCase_ : List[Any] = None
lowerCamelCase_ : Optional[Any] = None
if self.use_labels:
lowerCamelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : List[Any] = MPNetModel(config=A__ )
model.to(A__ )
model.eval()
lowerCamelCase_ : Dict = model(A__ , A__ )
lowerCamelCase_ : Optional[int] = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : Optional[int] = MPNetForQuestionAnswering(config=A__ )
model.to(A__ )
model.eval()
lowerCamelCase_ : Optional[int] = model(
A__ , attention_mask=A__ , start_positions=A__ , end_positions=A__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : Tuple = self.num_labels
lowerCamelCase_ : Optional[Any] = MPNetForSequenceClassification(A__ )
model.to(A__ )
model.eval()
lowerCamelCase_ : str = model(A__ , attention_mask=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : Dict = self.num_choices
lowerCamelCase_ : Dict = MPNetForMultipleChoice(config=A__ )
model.to(A__ )
model.eval()
lowerCamelCase_ : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : List[str] = model(
A__ , attention_mask=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCamelCase_ : List[Any] = self.num_labels
lowerCamelCase_ : str = MPNetForTokenClassification(config=A__ )
model.to(A__ )
model.eval()
lowerCamelCase_ : Optional[Any] = model(A__ , attention_mask=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self.prepare_config_and_inputs()
((lowerCamelCase_) ,(lowerCamelCase_) ,(lowerCamelCase_) ,(lowerCamelCase_) ,(lowerCamelCase_) ,(lowerCamelCase_)) : Tuple = config_and_inputs
lowerCamelCase_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __a, __a, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : int = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : int = False
__UpperCAmelCase : Dict = True
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = MPNetModelTester(self )
lowerCamelCase_ : Any = ConfigTester(self , config_class=A__ , hidden_size=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*A__ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*A__ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*A__ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*A__ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*A__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = MPNetModel.from_pretrained("microsoft/mpnet-base" )
lowerCamelCase_ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase_ : List[Any] = model(A__ )[0]
lowerCamelCase_ : Dict = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , A__ )
lowerCamelCase_ : List[Any] = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , A__ , atol=1E-4 ) )
| 700 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = StableDiffusionDiffEditPipeline
__UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
__UpperCAmelCase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : List[str] = frozenset([] )
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , )
lowerCamelCase_ : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
lowerCamelCase_ : Dict = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_zero=a_ , )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase_ : Optional[Any] = CLIPTextModel(a_ )
lowerCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : str = floats_tensor((1, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : List[Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : List[Any] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Tuple = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Any = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Tuple = torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : int = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self , a_ , a_=0 ):
lowerCamelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Optional[int] = Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Optional[int] = torch.manual_seed(a_ )
else:
lowerCamelCase_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Union[str, Any] = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self ):
if not hasattr(self.pipeline_class , "_optional_components" ):
return
lowerCamelCase_ : List[Any] = self.get_dummy_components()
lowerCamelCase_ : int = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a_ , a_ , a_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCamelCase_ : int = self.get_dummy_inputs(a_ )
lowerCamelCase_ : int = pipe(**a_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a_ )
lowerCamelCase_ : Optional[int] = self.pipeline_class.from_pretrained(a_ )
pipe_loaded.to(a_ )
pipe_loaded.set_progress_bar_config(disable=a_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a_ , a_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCamelCase_ : List[str] = self.get_dummy_inputs(a_ )
lowerCamelCase_ : Optional[int] = pipe_loaded(**a_ )[0]
lowerCamelCase_ : Optional[int] = np.abs(output - output_loaded ).max()
self.assertLess(a_ , 1E-4 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : List[Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = self.get_dummy_mask_inputs(a_ )
lowerCamelCase_ : int = pipe.generate_mask(**a_ )
lowerCamelCase_ : List[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCamelCase_ : List[str] = np.array([0] * 9 )
lowerCamelCase_ : Optional[int] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = "cpu"
lowerCamelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Dict = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : Dict = pipe.invert(**a_ ).images
lowerCamelCase_ : str = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Dict = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
def _UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = "cpu"
lowerCamelCase_ : int = self.get_dummy_components()
lowerCamelCase_ : int = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"}
lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler(**a_ )
lowerCamelCase_ : List[str] = DPMSolverMultistepInverseScheduler(**a_ )
lowerCamelCase_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : int = self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : str = pipe.invert(**a_ ).images
lowerCamelCase_ : int = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : Union[str, Any] = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
lowerCamelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _UpperCamelCase ( cls ):
lowerCamelCase_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
lowerCamelCase_ : int = raw_image.convert("RGB" ).resize((768, 768) )
lowerCamelCase_ : List[Any] = raw_image
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = torch.manual_seed(0 )
lowerCamelCase_ : Tuple = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : str = "a bowl of fruit"
lowerCamelCase_ : Optional[int] = "a bowl of pears"
lowerCamelCase_ : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ ).latents
lowerCamelCase_ : List[str] = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Any = "a bowl of fruit"
lowerCamelCase_ : Dict = "a bowl of pears"
lowerCamelCase_ : Optional[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : str = pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ , num_inference_steps=25 , ).latents
lowerCamelCase_ : Any = pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCamelCase_ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 73 | 0 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
lowerCamelCase_ : Dict = np.full((len(_SCREAMING_SNAKE_CASE), sequence_length, 2) , _SCREAMING_SNAKE_CASE)
else:
lowerCamelCase_ : str = np.full((len(_SCREAMING_SNAKE_CASE), sequence_length) , _SCREAMING_SNAKE_CASE)
for i, tensor in enumerate(_SCREAMING_SNAKE_CASE):
if padding_side == "right":
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
lowerCamelCase_ : str = tensor[:sequence_length]
else:
lowerCamelCase_ : str = tensor[:sequence_length]
else:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
lowerCamelCase_ : Any = tensor[:sequence_length]
else:
lowerCamelCase_ : int = tensor[:sequence_length]
return out_tensor.tolist()
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Dict = ord(_SCREAMING_SNAKE_CASE)
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
lowerCamelCase_ : List[Any] = unicodedata.category(_SCREAMING_SNAKE_CASE)
if cat.startswith("P"):
return True
return False
@dataclass
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : PreTrainedTokenizerBase
__UpperCAmelCase : Union[bool, str, PaddingStrategy] = True
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : int = -100
__UpperCAmelCase : str = "pt"
def _UpperCamelCase ( self , a_ ):
import torch
lowerCamelCase_ : Optional[Any] = "label" if "label" in features[0].keys() else "labels"
lowerCamelCase_ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowerCamelCase_ : List[Any] = self.tokenizer.pad(
a_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
lowerCamelCase_ : str = torch.tensor(batch["entity_ids"] ).shape[1]
lowerCamelCase_ : Dict = self.tokenizer.padding_side
if padding_side == "right":
lowerCamelCase_ : Union[str, Any] = [
list(a_ ) + [self.label_pad_token_id] * (sequence_length - len(a_ )) for label in labels
]
else:
lowerCamelCase_ : Optional[int] = [
[self.label_pad_token_id] * (sequence_length - len(a_ )) + list(a_ ) for label in labels
]
lowerCamelCase_ : List[Any] = [feature["ner_tags"] for feature in features]
lowerCamelCase_ : Optional[int] = padding_tensor(a_ , -1 , a_ , a_ )
lowerCamelCase_ : Union[str, Any] = [feature["original_entity_spans"] for feature in features]
lowerCamelCase_ : Any = padding_tensor(a_ , (-1, -1) , a_ , a_ )
lowerCamelCase_ : List[Any] = {k: torch.tensor(a_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 701 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ["a", "b", "c"]
# Defaults to last layer if both are None
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
lowerCamelCase_ ,lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = BackboneMixin()
lowerCamelCase_ : List[Any] = ["a", "b", "c"]
lowerCamelCase_ : Optional[int] = ["a", "c"]
lowerCamelCase_ : Dict = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase_ : Union[str, Any] = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCamelCase_ : str = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 73 | 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : int = tmp_path / "cache"
lowerCamelCase_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase_ : List[str] = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__).read()
_check_parquet_dataset(lowercase__ , lowercase__)
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = tmp_path / "cache"
lowerCamelCase_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase_ : int = features.copy() if features else default_expected_features
lowerCamelCase_ : Union[str, Any] = (
Features({feature: Value(lowercase__) for feature, dtype in features.items()}) if features is not None else None
)
lowerCamelCase_ : List[Any] = ParquetDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__).read()
_check_parquet_dataset(lowercase__ , lowercase__)
@pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"])
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : int = tmp_path / "cache"
lowerCamelCase_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase_ : Tuple = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , split=lowercase__).read()
_check_parquet_dataset(lowercase__ , lowercase__)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list])
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if issubclass(lowercase__ , lowercase__):
lowerCamelCase_ : Dict = parquet_path
elif issubclass(lowercase__ , lowercase__):
lowerCamelCase_ : int = [parquet_path]
lowerCamelCase_ : Optional[int] = tmp_path / "cache"
lowerCamelCase_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase_ : str = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__).read()
_check_parquet_dataset(lowercase__ , lowercase__)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=("train",)):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__)
for split in splits:
lowerCamelCase_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = tmp_path / "cache"
lowerCamelCase_ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase_ : str = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=lowercase__ , keep_in_memory=lowercase__).read()
_check_parquet_datasetdict(lowercase__ , lowercase__)
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[Any] = tmp_path / "cache"
lowerCamelCase_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase_ : Any = features.copy() if features else default_expected_features
lowerCamelCase_ : int = (
Features({feature: Value(lowercase__) for feature, dtype in features.items()}) if features is not None else None
)
lowerCamelCase_ : Optional[int] = ParquetDatasetReader({"train": parquet_path} , features=lowercase__ , cache_dir=lowercase__).read()
_check_parquet_datasetdict(lowercase__ , lowercase__)
@pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"])
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if split:
lowerCamelCase_ : List[str] = {split: parquet_path}
else:
lowerCamelCase_ : Dict = "train"
lowerCamelCase_ : Dict = {"train": parquet_path, "test": parquet_path}
lowerCamelCase_ : Union[str, Any] = tmp_path / "cache"
lowerCamelCase_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase_ : Optional[Any] = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ , splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : str = ParquetDatasetWriter(lowercase__ , tmp_path / "foo.parquet")
assert writer.write() > 0
lowerCamelCase_ : Union[str, Any] = pq.ParquetFile(tmp_path / "foo.parquet")
lowerCamelCase_ : Dict = pf.read()
assert dataset.data.table == output_table
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : str = str(shared_datadir / "test_image_rgb.jpg")
lowerCamelCase_ : Tuple = {"image": [image_path]}
lowerCamelCase_ : int = Features({"image": Image()})
lowerCamelCase_ : List[Any] = Dataset.from_dict(lowercase__ , features=lowercase__)
lowerCamelCase_ : Optional[int] = ParquetDatasetWriter(lowercase__ , tmp_path / "foo.parquet")
assert writer.write() > 0
lowerCamelCase_ : Any = Dataset.from_parquet(str(tmp_path / "foo.parquet"))
assert dataset.features == reloaded_dataset.features
lowerCamelCase_ : Dict = ParquetDatasetReader(str(tmp_path / "foo.parquet") , streaming=lowercase__).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32")}), None),
(Features({"image": Image(), "foo": Value("int32")}), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio())}), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
assert get_writer_batch_size(lowercase__) == expected
| 702 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase : Tuple = ['''accelerate''', '''launch''']
__UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase : int = '''default_config.yaml'''
__UpperCAmelCase : Tuple = config_folder / config_file
__UpperCAmelCase : int = config_folder / '''_default_config.yaml'''
__UpperCAmelCase : int = Path('''tests/test_configs''' )
@classmethod
def _UpperCamelCase ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCamelCase ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=a_ ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''test-tpu'''
__UpperCAmelCase : Tuple = '''us-central1-a'''
__UpperCAmelCase : Tuple = '''ls'''
__UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase : Dict = '''cd /usr/share'''
__UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh'''
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
| 73 | 0 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__magic_name__ = _symbol_database.Default()
__magic_name__ = _descriptor_pool.Default().AddSerializedFile(
B'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
__magic_name__ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__magic_name__ = None
__magic_name__ = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__magic_name__ = 4_5
__magic_name__ = 1_5_8_1
__magic_name__ = 1_5_1_7
__magic_name__ = 1_5_7_0
__magic_name__ = 1_5_8_4
__magic_name__ = 1_7_9_3
__magic_name__ = 1_7_9_5
__magic_name__ = 1_9_1_6
__magic_name__ = 1_8_6_4
__magic_name__ = 1_9_0_5
__magic_name__ = 1_9_1_9
__magic_name__ = 2_4_2_9
__magic_name__ = 2_2_0_8
__magic_name__ = 2_4_1_8
__magic_name__ = 2_3_2_3
__magic_name__ = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 703 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ , a_ ):
super().__init__()
self.register_modules(vqvae=a_ , unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self , a_ = 1 , a_ = None , a_ = 0.0 , a_ = 50 , a_ = "pil" , a_ = True , **a_ , ):
lowerCamelCase_ : Optional[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , )
lowerCamelCase_ : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase_ : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ : Optional[int] = {}
if accepts_eta:
lowerCamelCase_ : Optional[int] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase_ : Dict = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
lowerCamelCase_ : Optional[Any] = self.unet(a_ , a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ : List[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# decode the image latents with the VAE
lowerCamelCase_ : str = self.vqvae.decode(a_ ).sample
lowerCamelCase_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ : Optional[Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 73 | 0 |
import argparse
import copy
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[Any] = {}
with open(SCREAMING_SNAKE_CASE__) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowerCamelCase_ : int = []
_list.append([line.split()[1], line.split()[2]])
lowerCamelCase_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]])
if line.split()[1] not in dict_of_neighbours:
lowerCamelCase_ : str = []
_list.append([line.split()[0], line.split()[2]])
lowerCamelCase_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]])
return dict_of_neighbours
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__) as f:
lowerCamelCase_ : Optional[Any] = f.read(1)
lowerCamelCase_ : Union[str, Any] = start_node
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Union[str, Any] = start_node
lowerCamelCase_ : Tuple = 0
while visiting not in first_solution:
lowerCamelCase_ : int = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1]) < int(SCREAMING_SNAKE_CASE__) and k[0] not in first_solution:
lowerCamelCase_ : Union[str, Any] = k[1]
lowerCamelCase_ : Any = k[0]
first_solution.append(SCREAMING_SNAKE_CASE__)
lowerCamelCase_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__)
lowerCamelCase_ : List[str] = best_node
first_solution.append(SCREAMING_SNAKE_CASE__)
lowerCamelCase_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowerCamelCase_ : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1])
- 1_0000
)
return first_solution, distance_of_first_solution
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = []
for n in solution[1:-1]:
lowerCamelCase_ : str = solution.index(SCREAMING_SNAKE_CASE__)
for kn in solution[1:-1]:
lowerCamelCase_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__)
if n == kn:
continue
lowerCamelCase_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__)
lowerCamelCase_ : int = kn
lowerCamelCase_ : Dict = n
lowerCamelCase_ : Optional[int] = 0
for k in _tmp[:-1]:
lowerCamelCase_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowerCamelCase_ : Dict = distance + int(i[1])
_tmp.append(SCREAMING_SNAKE_CASE__)
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp)
lowerCamelCase_ : Optional[Any] = len(neighborhood_of_solution[0]) - 1
neighborhood_of_solution.sort(key=lambda lowerCAmelCase_: x[index_of_last_item_in_the_list])
return neighborhood_of_solution
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Dict = 1
lowerCamelCase_ : List[Any] = first_solution
lowerCamelCase_ : List[Any] = []
lowerCamelCase_ : Optional[Any] = distance_of_first_solution
lowerCamelCase_ : Dict = solution
while count <= iters:
lowerCamelCase_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : List[Any] = neighborhood[index_of_best_solution]
lowerCamelCase_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__) - 1
lowerCamelCase_ : List[str] = False
while not found:
lowerCamelCase_ : Tuple = 0
while i < len(SCREAMING_SNAKE_CASE__):
if best_solution[i] != solution[i]:
lowerCamelCase_ : Optional[Any] = best_solution[i]
lowerCamelCase_ : int = solution[i]
break
lowerCamelCase_ : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node])
lowerCamelCase_ : Tuple = True
lowerCamelCase_ : Dict = best_solution[:-1]
lowerCamelCase_ : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowerCamelCase_ : Tuple = cost
lowerCamelCase_ : Union[str, Any] = solution
else:
lowerCamelCase_ : str = index_of_best_solution + 1
lowerCamelCase_ : Tuple = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE__) >= size:
tabu_list.pop(0)
lowerCamelCase_ : List[str] = count + 1
return best_solution_ever, best_cost
def __magic_name__ ( lowerCAmelCase_=None):
'''simple docstring'''
lowerCamelCase_ : Tuple = generate_neighbours(args.File)
lowerCamelCase_ : Optional[Any] = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE__)
lowerCamelCase_ : Dict = tabu_search(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , )
print(F"""Best solution: {best_sol}, with total distance: {best_cost}.""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 704 |
import re
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if len(re.findall("[ATCG]" , lowerCAmelCase_)) != len(lowerCAmelCase_):
raise ValueError("Invalid Strand")
return dna.translate(dna.maketrans("ATCG" , "TAGC"))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
import operator as op
__magic_name__ = '''scaler.pt'''
__magic_name__ = '''pytorch_model'''
__magic_name__ = '''random_states'''
__magic_name__ = '''optimizer'''
__magic_name__ = '''scheduler'''
__magic_name__ = '''pytorch_model.bin'''
__magic_name__ = '''pytorch_model.bin.index.json'''
__magic_name__ = '''model.safetensors'''
__magic_name__ = '''model.safetensors.index.json'''
__magic_name__ = '''1.10.2'''
__magic_name__ = '''py38'''
__magic_name__ = '''4.17.0'''
__magic_name__ = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
__magic_name__ = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
__magic_name__ = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
__magic_name__ = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
__magic_name__ = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
__magic_name__ = '''2.0.1'''
__magic_name__ = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
__magic_name__ = ['''default''', '''reduce-overhead''', '''max-autotune''']
__magic_name__ = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__magic_name__ = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
__magic_name__ = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
__magic_name__ = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 705 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowerCAmelCase_), magnitude * sin(lowerCAmelCase_)]
return [magnitude * cos(radians(lowerCAmelCase_)), magnitude * sin(radians(lowerCAmelCase_))]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1):
'''simple docstring'''
lowerCamelCase_ : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : float = sum(lowerCAmelCase_)
return abs(lowerCAmelCase_) < eps
if __name__ == "__main__":
# Test to check if it works
__magic_name__ = array(
[
polar_force(7_18.4, 1_8_0 - 3_0),
polar_force(8_79.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__magic_name__ = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__magic_name__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__magic_name__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = (DEISMultistepScheduler,)
__UpperCAmelCase : List[str] = (("num_inference_steps", 25),)
def _UpperCamelCase ( self , **a_ ):
lowerCamelCase_ : Union[str, Any] = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**a_ )
return config
def _UpperCamelCase ( self , a_=0 , **a_ ):
lowerCamelCase_ : Any = dict(self.forward_default_kwargs )
lowerCamelCase_ : Any = kwargs.pop("num_inference_steps" , a_ )
lowerCamelCase_ : Dict = self.dummy_sample
lowerCamelCase_ : str = 0.1 * sample
lowerCamelCase_ : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase_ : List[Any] = self.get_scheduler_config(**a_ )
lowerCamelCase_ : List[Any] = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
lowerCamelCase_ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
lowerCamelCase_ : Tuple = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
lowerCamelCase_ : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = sample, sample
for t in range(a_ , time_step + scheduler.config.solver_order + 1 ):
lowerCamelCase_ : List[str] = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
lowerCamelCase_ : Any = new_scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self , a_=0 , **a_ ):
lowerCamelCase_ : int = dict(self.forward_default_kwargs )
lowerCamelCase_ : Tuple = kwargs.pop("num_inference_steps" , a_ )
lowerCamelCase_ : Union[str, Any] = self.dummy_sample
lowerCamelCase_ : Tuple = 0.1 * sample
lowerCamelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase_ : Tuple = self.get_scheduler_config()
lowerCamelCase_ : List[Any] = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase_ : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
lowerCamelCase_ : Optional[Any] = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase_ : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase_ : Optional[Any] = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
lowerCamelCase_ : List[Any] = new_scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _UpperCamelCase ( self , a_=None , **a_ ):
if scheduler is None:
lowerCamelCase_ : Dict = self.scheduler_classes[0]
lowerCamelCase_ : str = self.get_scheduler_config(**a_ )
lowerCamelCase_ : Union[str, Any] = scheduler_class(**a_ )
lowerCamelCase_ : List[str] = self.scheduler_classes[0]
lowerCamelCase_ : List[Any] = self.get_scheduler_config(**a_ )
lowerCamelCase_ : List[Any] = scheduler_class(**a_ )
lowerCamelCase_ : Optional[int] = 10
lowerCamelCase_ : Union[str, Any] = self.dummy_model()
lowerCamelCase_ : Any = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ : Optional[Any] = model(a_ , a_ )
lowerCamelCase_ : Optional[int] = scheduler.step(a_ , a_ , a_ ).prev_sample
return sample
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = dict(self.forward_default_kwargs )
lowerCamelCase_ : Optional[Any] = kwargs.pop("num_inference_steps" , a_ )
for scheduler_class in self.scheduler_classes:
lowerCamelCase_ : Any = self.get_scheduler_config()
lowerCamelCase_ : Any = scheduler_class(**a_ )
lowerCamelCase_ : str = self.dummy_sample
lowerCamelCase_ : int = 0.1 * sample
if num_inference_steps is not None and hasattr(a_ , "set_timesteps" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_ , "set_timesteps" ):
lowerCamelCase_ : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCamelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCamelCase_ : Dict = dummy_past_residuals[: scheduler.config.solver_order]
lowerCamelCase_ : Optional[Any] = scheduler.timesteps[5]
lowerCamelCase_ : str = scheduler.timesteps[6]
lowerCamelCase_ : List[Any] = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
lowerCamelCase_ : Optional[int] = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCamelCase_ : int = self.full_loop(scheduler=a_ )
lowerCamelCase_ : Union[str, Any] = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
lowerCamelCase_ : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCamelCase_ : Optional[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCamelCase_ : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
lowerCamelCase_ : Tuple = self.full_loop(scheduler=a_ )
lowerCamelCase_ : Union[str, Any] = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
def _UpperCamelCase ( self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=a_ )
def _UpperCamelCase ( self ):
self.check_over_configs(thresholding=a_ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=a_ , prediction_type=a_ , sample_max_value=a_ , algorithm_type="deis" , solver_order=a_ , solver_type=a_ , )
def _UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def _UpperCamelCase ( self ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=a_ , solver_type=a_ , prediction_type=a_ , algorithm_type=a_ , )
lowerCamelCase_ : Tuple = self.full_loop(
solver_order=a_ , solver_type=a_ , prediction_type=a_ , algorithm_type=a_ , )
assert not torch.isnan(a_ ).any(), "Samples have nan numbers"
def _UpperCamelCase ( self ):
self.check_over_configs(lower_order_final=a_ )
self.check_over_configs(lower_order_final=a_ )
def _UpperCamelCase ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=a_ , time_step=0 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.full_loop()
lowerCamelCase_ : str = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = self.full_loop(prediction_type="v_prediction" )
lowerCamelCase_ : str = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 0.0_91 ) < 1E-3
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase_ : Optional[int] = self.get_scheduler_config(thresholding=a_ , dynamic_thresholding_ratio=0 )
lowerCamelCase_ : Dict = scheduler_class(**a_ )
lowerCamelCase_ : List[str] = 10
lowerCamelCase_ : int = self.dummy_model()
lowerCamelCase_ : Optional[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ : int = model(a_ , a_ )
lowerCamelCase_ : Union[str, Any] = scheduler.step(a_ , a_ , a_ ).prev_sample
assert sample.dtype == torch.floataa
| 706 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ClapFeatureExtractor'''
__UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
def __call__( self , a_=None , a_=None , a_=None , **a_ ):
lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if audios is not None:
lowerCamelCase_ : List[str] = self.feature_extractor(
a_ , sampling_rate=a_ , return_tensors=a_ , **a_ )
if text is not None and audios is not None:
lowerCamelCase_ : List[str] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.tokenizer.model_input_names
lowerCamelCase_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 73 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCAmelCase__ ( _UpperCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = "roformer"
def __init__( self , a_=5_0000 , a_=None , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=1536 , a_=2 , a_=0.02 , a_=1E-12 , a_=0 , a_=False , a_=True , **a_ , ):
super().__init__(pad_token_id=__a , **__a )
lowerCamelCase_ : Any = vocab_size
lowerCamelCase_ : Optional[Any] = hidden_size if embedding_size is None else embedding_size
lowerCamelCase_ : str = hidden_size
lowerCamelCase_ : Optional[Any] = num_hidden_layers
lowerCamelCase_ : List[str] = num_attention_heads
lowerCamelCase_ : Optional[Any] = hidden_act
lowerCamelCase_ : Union[str, Any] = intermediate_size
lowerCamelCase_ : List[str] = hidden_dropout_prob
lowerCamelCase_ : Tuple = attention_probs_dropout_prob
lowerCamelCase_ : Any = max_position_embeddings
lowerCamelCase_ : Optional[Any] = type_vocab_size
lowerCamelCase_ : List[str] = initializer_range
lowerCamelCase_ : int = layer_norm_eps
lowerCamelCase_ : Optional[int] = rotary_value
lowerCamelCase_ : int = use_cache
class lowerCAmelCase__ ( _UpperCamelCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
if self.task == "multiple-choice":
lowerCamelCase_ : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase_ : int = {0: "batch", 1: "sequence"}
lowerCamelCase_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 707 |
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : Any = set()
# Replace all the whitespace in our sentence
lowerCamelCase_ : str = input_str.replace(" " , "")
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(lowerCAmelCase_) == 26
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = [False] * 26
for char in input_str:
if char.islower():
lowerCamelCase_ : List[Any] = True
elif char.isupper():
lowerCamelCase_ : Optional[int] = True
return all(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def __magic_name__ ( ):
'''simple docstring'''
from timeit import timeit
lowerCamelCase_ : Optional[int] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_faster()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_fastest()" , setup=lowerCAmelCase_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 73 | 0 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__magic_name__ = '''<<<<<<< This should probably be modified because it mentions: '''
__magic_name__ = '''=======
>>>>>>>
'''
__magic_name__ = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
__magic_name__ = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory)
class lowerCAmelCase__ ( UpperCAmelCase_ ):
"""simple docstring"""
@staticmethod
def _UpperCamelCase ( a_ ):
lowerCamelCase_ : int = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=_snake_case , required=_snake_case , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=_snake_case , required=_snake_case , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=_snake_case )
def __init__( self , a_ , a_ , *a_ ):
lowerCamelCase_ : Tuple = get_logger("datasets-cli/converting" )
lowerCamelCase_ : List[str] = tfds_path
lowerCamelCase_ : Union[str, Any] = datasets_directory
def _UpperCamelCase ( self ):
if os.path.isdir(self._tfds_path ):
lowerCamelCase_ : Dict = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowerCamelCase_ : Union[str, Any] = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
lowerCamelCase_ : Optional[Any] = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
lowerCamelCase_ : List[str] = []
lowerCamelCase_ : Any = []
lowerCamelCase_ : List[Any] = {}
if os.path.isdir(self._tfds_path ):
lowerCamelCase_ : Optional[int] = os.listdir(_snake_case )
else:
lowerCamelCase_ : str = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
lowerCamelCase_ : List[Any] = os.path.join(_snake_case , _snake_case )
lowerCamelCase_ : Dict = os.path.join(_snake_case , _snake_case )
if not os.path.isfile(_snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(_snake_case , encoding="utf-8" ) as f:
lowerCamelCase_ : Optional[Any] = f.readlines()
lowerCamelCase_ : List[str] = []
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : Optional[Any] = False
lowerCamelCase_ : str = []
for line in lines:
lowerCamelCase_ : Tuple = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowerCamelCase_ : Tuple = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
lowerCamelCase_ : str = ""
continue
elif "from absl import logging" in out_line:
lowerCamelCase_ : Optional[Any] = "from datasets import logging\n"
elif "getLogger" in out_line:
lowerCamelCase_ : Optional[Any] = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowerCamelCase_ : int = True
lowerCamelCase_ : List[Any] = list(filter(lambda a_ : e in out_line , _snake_case ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_snake_case ) + "\n" )
out_lines.append(_snake_case )
out_lines.append(_snake_case )
continue
else:
for pattern, replacement in TO_CONVERT:
lowerCamelCase_ : int = re.sub(_snake_case , _snake_case , _snake_case )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowerCamelCase_ : Union[str, Any] = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , _snake_case )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
lowerCamelCase_ : Tuple = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowerCamelCase_ : Optional[int] = True
out_lines.append(_snake_case )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowerCamelCase_ : Union[str, Any] = f_name.replace(".py" , "" )
lowerCamelCase_ : Any = os.path.join(_snake_case , _snake_case )
lowerCamelCase_ : int = os.path.join(_snake_case , _snake_case )
os.makedirs(_snake_case , exist_ok=_snake_case )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_snake_case )
if needs_manual_update:
with_manual_update.append(_snake_case )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.writelines(_snake_case )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
lowerCamelCase_ : Tuple = os.path.basename(_snake_case )
lowerCamelCase_ : Optional[Any] = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(_snake_case , _snake_case )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.""" )
| 708 |
__magic_name__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase_ : List[Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(lowerCAmelCase_)}"""
)
raise ValueError(lowerCAmelCase_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase__ ( _lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = ShapEPipeline
__UpperCAmelCase : Dict = ['prompt']
__UpperCAmelCase : Union[str, Any] = ['prompt']
__UpperCAmelCase : Any = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
__UpperCAmelCase : str = False
@property
def _UpperCamelCase ( self ):
return 32
@property
def _UpperCamelCase ( self ):
return 32
@property
def _UpperCamelCase ( self ):
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self ):
return 8
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowerCAmelCase )
@property
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : int = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
lowerCamelCase_ : Union[str, Any] = PriorTransformer(**_lowerCAmelCase )
return model
@property
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
lowerCamelCase_ : List[str] = ShapERenderer(**_lowerCAmelCase )
return model
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.dummy_prior
lowerCamelCase_ : Tuple = self.dummy_text_encoder
lowerCamelCase_ : Tuple = self.dummy_tokenizer
lowerCamelCase_ : Tuple = self.dummy_renderer
lowerCamelCase_ : Tuple = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=_lowerCAmelCase , clip_sample=_lowerCAmelCase , clip_sample_range=1.0 , )
lowerCamelCase_ : Any = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _UpperCamelCase ( self , a_ , a_=0 ):
if str(_lowerCAmelCase ).startswith("mps" ):
lowerCamelCase_ : Dict = torch.manual_seed(_lowerCAmelCase )
else:
lowerCamelCase_ : List[Any] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
lowerCamelCase_ : Optional[Any] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = "cpu"
lowerCamelCase_ : Tuple = self.get_dummy_components()
lowerCamelCase_ : Optional[Any] = self.pipeline_class(**_lowerCAmelCase )
lowerCamelCase_ : Tuple = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCamelCase_ : Any = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
lowerCamelCase_ : List[Any] = output.images[0]
lowerCamelCase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCamelCase_ : Optional[int] = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ):
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = torch_device == "cpu"
lowerCamelCase_ : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowerCAmelCase , relax_max_difference=_lowerCAmelCase , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[int] = self.get_dummy_components()
lowerCamelCase_ : Optional[int] = self.pipeline_class(**_lowerCAmelCase )
lowerCamelCase_ : Optional[Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : str = 2
lowerCamelCase_ : Any = self.get_dummy_inputs(_lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCamelCase_ : Optional[Any] = batch_size * [inputs[key]]
lowerCamelCase_ : List[Any] = pipe(**_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
lowerCamelCase_ : Any = ShapEPipeline.from_pretrained("openai/shap-e" )
lowerCamelCase_ : Union[str, Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCamelCase_ : List[Any] = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
lowerCamelCase_ : List[Any] = pipe(
"a shark" , generator=_lowerCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 709 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''spiece.model'''}
__magic_name__ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__magic_name__ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__magic_name__ = 0
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 3
__magic_name__ = 4
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] = '''left'''
def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
lowerCamelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = do_lower_case
lowerCamelCase_ : str = remove_space
lowerCamelCase_ : Tuple = keep_accents
lowerCamelCase_ : Dict = vocab_file
lowerCamelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def _UpperCamelCase ( self ):
return len(self.sp_model )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowerCamelCase_ : Any = self.__dict__.copy()
lowerCamelCase_ : Optional[int] = None
return state
def __setstate__( self , a_ ):
lowerCamelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : int = {}
lowerCamelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , a_ ):
if self.remove_space:
lowerCamelCase_ : Optional[int] = " ".join(inputs.strip().split() )
else:
lowerCamelCase_ : str = inputs
lowerCamelCase_ : Any = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase_ : Dict = unicodedata.normalize("NFKD" , a_ )
lowerCamelCase_ : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
lowerCamelCase_ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : List[Any] = self.preprocess_text(a_ )
lowerCamelCase_ : Optional[int] = self.sp_model.encode(a_ , out_type=a_ )
lowerCamelCase_ : List[str] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ : int = cur_pieces[1:]
else:
lowerCamelCase_ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def _UpperCamelCase ( self , a_ ):
return self.sp_model.PieceToId(a_ )
def _UpperCamelCase ( self , a_ ):
return self.sp_model.IdToPiece(a_ )
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Dict = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def _UpperCamelCase ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ):
lowerCamelCase_ : int = kwargs.pop("use_source_tokenizer" , a_ )
lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
lowerCamelCase_ : Union[str, Any] = []
sub_texts.append(a_ )
else:
current_sub_text.append(a_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase_ : Union[str, Any] = "".join(a_ )
lowerCamelCase_ : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase_ : List[Any] = self.clean_up_tokenization(a_ )
return clean_text
else:
return text
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
lowerCamelCase_ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Any = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
lowerCamelCase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 73 | 0 |
def __magic_name__ ( lowerCAmelCase_ = 200):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = [1, 2, 5, 10, 20, 50, 100, 200]
lowerCamelCase_ : List[str] = [0] * (pence + 1)
lowerCamelCase_ : int = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(snake_case_ , pence + 1 , 1):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 710 |
def __magic_name__ ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = True):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return int((number_a + number_a) / 2)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(lowerCAmelCase_) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowerCamelCase_ : Optional[int] = lower
lowerCamelCase_ : Tuple = higher
lowerCamelCase_ : Union[str, Any] = []
while True:
lowerCamelCase_ : Optional[int] = get_avg(lowerCAmelCase_ , lowerCAmelCase_)
last_numbers.append(lowerCAmelCase_)
if answer(lowerCAmelCase_) == "low":
lowerCamelCase_ : Any = number
elif answer(lowerCAmelCase_) == "high":
lowerCamelCase_ : Optional[int] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""")
print(F"""details : {last_numbers!s}""")
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = int(input("Enter lower value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter high value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter value to guess : ").strip())
guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
main()
| 73 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=7 , a_=3 , a_=30 , a_=400 , a_=True , a_=None , a_=0.9 , a_=None , a_=True , a_=[0.5, 0.5, 0.5] , a_=[0.5, 0.5, 0.5] , ):
lowerCamelCase_ : List[str] = size if size is not None else {'''shortest_edge''': 30}
lowerCamelCase_ : Optional[Any] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
lowerCamelCase_ : List[str] = parent
lowerCamelCase_ : Union[str, Any] = batch_size
lowerCamelCase_ : Any = num_channels
lowerCamelCase_ : Optional[int] = min_resolution
lowerCamelCase_ : Any = max_resolution
lowerCamelCase_ : List[str] = do_resize_and_center_crop
lowerCamelCase_ : Dict = size
lowerCamelCase_ : Dict = crop_pct
lowerCamelCase_ : Union[str, Any] = crop_size
lowerCamelCase_ : str = do_normalize
lowerCamelCase_ : Optional[Any] = image_mean
lowerCamelCase_ : List[Any] = image_std
def _UpperCamelCase ( self ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = PoolFormerImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "crop_pct" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
lowerCamelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase_ : Dict = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
lowerCamelCase_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase_ : Tuple = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase_ : str = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 711 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''cvt'''
def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : str = patch_sizes
lowerCamelCase_ : List[Any] = patch_stride
lowerCamelCase_ : str = patch_padding
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Union[str, Any] = num_heads
lowerCamelCase_ : Optional[Any] = depth
lowerCamelCase_ : int = mlp_ratio
lowerCamelCase_ : Union[str, Any] = attention_drop_rate
lowerCamelCase_ : Optional[Any] = drop_rate
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Union[str, Any] = qkv_bias
lowerCamelCase_ : int = cls_token
lowerCamelCase_ : int = qkv_projection_method
lowerCamelCase_ : int = kernel_qkv
lowerCamelCase_ : Optional[Any] = padding_kv
lowerCamelCase_ : Optional[int] = stride_kv
lowerCamelCase_ : Optional[int] = padding_q
lowerCamelCase_ : List[Any] = stride_q
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = layer_norm_eps
| 73 | 0 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 712 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''EncodecFeatureExtractor'''
__UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
lowerCamelCase_ : Optional[Any] = self.feature_extractor
lowerCamelCase_ : Optional[int] = False
def _UpperCamelCase ( self , a_=None , a_=None , a_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ )
def __call__( self , *a_ , **a_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
lowerCamelCase_ : str = kwargs.pop("audio" , a_ )
lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : int = args[0]
lowerCamelCase_ : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ )
if audio is not None:
lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCamelCase_ : Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCamelCase_ : int = audio_inputs["padding_mask"]
return inputs
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : Optional[int] = args[0]
lowerCamelCase_ : Optional[Any] = args[1:]
if audio_values is not None:
return self._decode_audio(a_ , padding_mask=a_ )
else:
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Any = to_numpy(a_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape
if padding_mask is None:
return list(a_ )
lowerCamelCase_ : Tuple = to_numpy(a_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1]
lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value
lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ )
lowerCamelCase_ : str = audio_values.tolist()
for i in range(a_ ):
lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 )
return audio_values
| 73 | 0 |
import datasets
from .evaluate import evaluate
_snake_case = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
_snake_case = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
_snake_case = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def _UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def _UpperCamelCase ( self , a_ , a_ ):
lowerCamelCase_ : List[Any] = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
lowerCamelCase_ : Tuple = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
lowerCamelCase_ : Dict = evaluate(dataset=__A , predictions=__A )
return score
| 714 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase_) , lowerCAmelCase_)
return number - int(lowerCAmelCase_)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 73 | 0 |
'''simple docstring'''
def __magic_name__ ( lowerCAmelCase_ = 10):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__) or n < 0:
raise ValueError("Invalid input")
lowerCamelCase_ : List[str] = 10**n
lowerCamelCase_ : str = 2_8433 * (pow(2 , 783_0457 , UpperCamelCase__)) + 1
return str(number % modulus)
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(1_0) = }''')
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ):
lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ : str = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : List[str] = apply_ocr
def _UpperCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "apply_ocr" ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# with apply_OCR = True
lowerCamelCase_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 73 | 0 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__magic_name__ = '''src/diffusers'''
# Matches is_xxx_available()
__magic_name__ = re.compile(R'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
__magic_name__ = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
__magic_name__ = '''
{0} = None
'''
__magic_name__ = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
__magic_name__ = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = _re_backend.findall(_UpperCAmelCase)
if len(_UpperCAmelCase) == 0:
return None
return "_and_".join(_UpperCAmelCase)
def __magic_name__ ( ):
'''simple docstring'''
with open(os.path.join(_UpperCAmelCase , "__init__.py") , "r" , encoding="utf-8" , newline="\n") as f:
lowerCamelCase_ : Dict = f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ : str = 0
lowerCamelCase_ : Any = {}
# Go through the end of the file
while line_index < len(_UpperCAmelCase):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ : Optional[int] = find_backend(lines[line_index])
if backend is not None:
while not lines[line_index].startswith("else:"):
line_index += 1
line_index += 1
lowerCamelCase_ : List[Any] = []
# Until we unindent, add backend objects to the list
while line_index < len(_UpperCAmelCase) and len(lines[line_index]) > 1:
lowerCamelCase_ : List[Any] = lines[line_index]
lowerCamelCase_ : List[str] = _re_single_line_import.search(_UpperCAmelCase)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 8):
objects.append(line[8:-2])
line_index += 1
if len(_UpperCAmelCase) > 0:
lowerCamelCase_ : Any = objects
else:
line_index += 1
return backend_specific_objects
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(_UpperCAmelCase)
elif name.islower():
return DUMMY_FUNCTION.format(_UpperCAmelCase , _UpperCAmelCase)
else:
return DUMMY_CLASS.format(_UpperCAmelCase , _UpperCAmelCase)
def __magic_name__ ( lowerCAmelCase_=None):
'''simple docstring'''
if backend_specific_objects is None:
lowerCamelCase_ : int = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ : int = {}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ : List[Any] = "[" + ", ".join(F"""\"{b}\"""" for b in backend.split("_and_")) + "]"
lowerCamelCase_ : str = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_UpperCAmelCase , _UpperCAmelCase) for o in objects])
lowerCamelCase_ : Any = dummy_file
return dummy_files
def __magic_name__ ( lowerCAmelCase_=False):
'''simple docstring'''
lowerCamelCase_ : List[str] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ : Dict = {"torch": "pt"}
# Locate actual dummy modules and read their content.
lowerCamelCase_ : Optional[int] = os.path.join(_UpperCAmelCase , "utils")
lowerCamelCase_ : Dict = {
backend: os.path.join(_UpperCAmelCase , F"""dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase)}_objects.py""")
for backend in dummy_files.keys()
}
lowerCamelCase_ : Any = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_UpperCAmelCase):
with open(_UpperCAmelCase , "r" , encoding="utf-8" , newline="\n") as f:
lowerCamelCase_ : int = f.read()
else:
lowerCamelCase_ : str = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"""Updating diffusers.utils.dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase)}_objects.py as the main """
"__init__ has new objects.")
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n") as f:
f.write(dummy_files[backend])
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F"""diffusers.utils.dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase)}_objects.py. Run `make fix-copies` """
"to fix this.")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__magic_name__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 716 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''luke'''
def __init__( self , a_=5_0267 , a_=50_0000 , a_=768 , a_=256 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=True , a_=None , a_=1 , a_=0 , a_=2 , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : Optional[int] = entity_vocab_size
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Dict = entity_emb_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Tuple = intermediate_size
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Optional[Any] = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : Optional[int] = use_entity_aware_attention
lowerCamelCase_ : str = classifier_dropout
| 73 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.