code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : List[str] = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class _snake_case ( A__ ):
_lowercase : Any = '''git_vision_model'''
def __init__( self , a=768 , a=3072 , a=12 , a=12 , a=3 , a=224 , a=16 , a="quick_gelu" , a=1E-5 , a=0.0 , a=0.02 , **a , ) -> Optional[int]:
super().__init__(**a)
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , a , **a) -> "PretrainedConfig":
cls._set_token_in_kwargs(a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cls.get_config_dict(a , **a)
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type') == "git":
SCREAMING_SNAKE_CASE = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(a , **a)
class _snake_case ( A__ ):
_lowercase : List[Any] = '''git'''
def __init__( self , a=None , a=3_0522 , a=768 , a=6 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=1024 , a=0.02 , a=1E-12 , a=0 , a="absolute" , a=True , a=False , a=101 , a=102 , a=None , **a , ) -> Dict:
super().__init__(bos_token_id=a , eos_token_id=a , pad_token_id=a , **a)
if vision_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.')
SCREAMING_SNAKE_CASE = GitVisionConfig(**a)
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = num_image_with_embedding
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 73 |
from __future__ import annotations
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_UpperCAmelCase)
if n > 1:
factors.append(_UpperCAmelCase)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
a_ : Tuple = logging.get_logger(__name__)
a_ : List[Any] = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class _snake_case ( A__ ):
_lowercase : Union[str, Any] = '''longformer'''
def __init__( self , a = 512 , a = 2 , a = 1 , a = 0 , a = 2 , a = 3_0522 , a = 768 , a = 12 , a = 12 , a = 3072 , a = "gelu" , a = 0.1 , a = 0.1 , a = 512 , a = 2 , a = 0.02 , a = 1E-12 , a = False , **a , ) -> Dict:
super().__init__(pad_token_id=a , **a)
SCREAMING_SNAKE_CASE = attention_window
SCREAMING_SNAKE_CASE = sep_token_id
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = onnx_export
class _snake_case ( A__ ):
def __init__( self , a , a = "default" , a = None) -> Dict:
super().__init__(a , a , a)
SCREAMING_SNAKE_CASE = True
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
])
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE = {0: 'batch'}
return outputs
@property
def SCREAMING_SNAKE_CASE__ ( self) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14)
def SCREAMING_SNAKE_CASE__ ( self , a , a = -1 , a = -1 , a = False , a = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a)
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE = torch.zeros_like(inputs['input_ids'])
# make every second token global
SCREAMING_SNAKE_CASE = 1
return inputs
| 73 |
import math
import os
import sys
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = ''
try:
with open(_UpperCAmelCase , 'rb') as binary_file:
SCREAMING_SNAKE_CASE = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lexicon.pop(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = last_match_id
if math.loga(_UpperCAmelCase).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE = '0' + lexicon[curr_key]
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = '', ''
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
for i in range(len(_UpperCAmelCase)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
index += 1
SCREAMING_SNAKE_CASE = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
return result
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = os.path.getsize(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 8
try:
with open(_UpperCAmelCase , 'wb') as opened_file:
SCREAMING_SNAKE_CASE = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCAmelCase) , _UpperCAmelCase)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append('10000000')
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array:
opened_file.write(int(_UpperCAmelCase , 2).to_bytes(1 , byteorder='big'))
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = read_file_binary(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = compress_data(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = add_file_length(_UpperCAmelCase , _UpperCAmelCase)
write_file_binary(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 73 | 1 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : List[Any] = IFPipeline
_lowercase : List[Any] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
_lowercase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_lowercase : List[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]:
if str(a).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a)
SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> str:
# if
SCREAMING_SNAKE_CASE = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=a , tokenizer=a)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pipe_a.encode_prompt('anime turtle' , device='cuda')
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(a , a , a , a)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE = IFImgaImgPipeline(**pipe_a.components)
SCREAMING_SNAKE_CASE = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(a , a , a , a)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE = IFInpaintingPipeline(**pipe_a.components)
SCREAMING_SNAKE_CASE = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(a , a , a , a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a) -> str:
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE = torch.Generator(device='cpu').manual_seed(0)
SCREAMING_SNAKE_CASE = pipe_a(
prompt_embeds=a , negative_prompt_embeds=a , num_inference_steps=2 , generator=a , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy')
assert_mean_pixel_difference(a , a)
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE = torch.Generator(device='cpu').manual_seed(0)
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(a)
SCREAMING_SNAKE_CASE = pipe_a(
prompt_embeds=a , negative_prompt_embeds=a , image=a , generator=a , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy')
assert_mean_pixel_difference(a , a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(a)
SCREAMING_SNAKE_CASE = torch.Generator(device='cpu').manual_seed(0)
SCREAMING_SNAKE_CASE = pipe_a(
prompt_embeds=a , negative_prompt_embeds=a , image=a , num_inference_steps=2 , generator=a , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy')
assert_mean_pixel_difference(a , a)
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE = torch.Generator(device='cpu').manual_seed(0)
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(a)
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(a)
SCREAMING_SNAKE_CASE = pipe_a(
prompt_embeds=a , negative_prompt_embeds=a , image=a , original_image=a , generator=a , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy')
assert_mean_pixel_difference(a , a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a) -> Any:
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(a)
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(a)
SCREAMING_SNAKE_CASE = torch.Generator(device='cpu').manual_seed(0)
SCREAMING_SNAKE_CASE = pipe_a(
prompt_embeds=a , negative_prompt_embeds=a , image=a , mask_image=a , num_inference_steps=2 , generator=a , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy')
assert_mean_pixel_difference(a , a)
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE = torch.Generator(device='cpu').manual_seed(0)
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(a)
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(a)
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(a)
SCREAMING_SNAKE_CASE = pipe_a(
prompt_embeds=a , negative_prompt_embeds=a , image=a , mask_image=a , original_image=a , generator=a , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy')
assert_mean_pixel_difference(a , a)
def lowerCamelCase__ ():
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 73 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase__ (_UpperCAmelCase):
return 1.0 / (1.0 + np.exp(-_outputs))
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = np.max(_outputs , axis=-1 , keepdims=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = np.exp(_outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_UpperCAmelCase)
class _snake_case ( A__ ):
_lowercase : Tuple = '''sigmoid'''
_lowercase : List[str] = '''softmax'''
_lowercase : Tuple = '''none'''
@add_end_docstrings(
A__ , R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class _snake_case ( A__ ):
_lowercase : Optional[Any] = False
_lowercase : Tuple = ClassificationFunction.NONE
def __init__( self , **a) -> Optional[Any]:
super().__init__(**a)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
def SCREAMING_SNAKE_CASE__ ( self , a=None , a=None , a="" , **a) -> Tuple:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
SCREAMING_SNAKE_CASE = tokenizer_kwargs
SCREAMING_SNAKE_CASE = {}
if hasattr(self.model.config , 'return_all_scores') and return_all_scores is None:
SCREAMING_SNAKE_CASE = self.model.config.return_all_scores
if isinstance(a , a) or top_k is None:
SCREAMING_SNAKE_CASE = top_k
SCREAMING_SNAKE_CASE = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , a , )
if return_all_scores:
SCREAMING_SNAKE_CASE = None
else:
SCREAMING_SNAKE_CASE = 1
if isinstance(a , a):
SCREAMING_SNAKE_CASE = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
SCREAMING_SNAKE_CASE = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *a , **a) -> Optional[int]:
SCREAMING_SNAKE_CASE = super().__call__(*a , **a)
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
SCREAMING_SNAKE_CASE = 'top_k' not in kwargs
if isinstance(args[0] , a) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def SCREAMING_SNAKE_CASE__ ( self , a , **a) -> Dict[str, GenericTensor]:
SCREAMING_SNAKE_CASE = self.framework
if isinstance(a , a):
return self.tokenizer(**a , return_tensors=a , **a)
elif isinstance(a , a) and len(a) == 1 and isinstance(inputs[0] , a) and len(inputs[0]) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a , **a)
elif isinstance(a , a):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.')
return self.tokenizer(a , return_tensors=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]:
return self.model(**a)
def SCREAMING_SNAKE_CASE__ ( self , a , a=None , a=1 , a=True) -> Any:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
SCREAMING_SNAKE_CASE = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
SCREAMING_SNAKE_CASE = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply') and function_to_apply is None:
SCREAMING_SNAKE_CASE = self.model.config.function_to_apply
else:
SCREAMING_SNAKE_CASE = ClassificationFunction.NONE
SCREAMING_SNAKE_CASE = model_outputs['logits'][0]
SCREAMING_SNAKE_CASE = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
SCREAMING_SNAKE_CASE = sigmoid(a)
elif function_to_apply == ClassificationFunction.SOFTMAX:
SCREAMING_SNAKE_CASE = softmax(a)
elif function_to_apply == ClassificationFunction.NONE:
SCREAMING_SNAKE_CASE = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''')
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
SCREAMING_SNAKE_CASE = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(a)
]
if not _legacy:
dict_scores.sort(key=lambda a: x["score"] , reverse=a)
if top_k is not None:
SCREAMING_SNAKE_CASE = dict_scores[:top_k]
return dict_scores
| 73 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Tuple = '▁'
a_ : str = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
a_ : Optional[Any] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
a_ : List[Any] = {
'facebook/s2t-small-librispeech-asr': 10_24,
}
a_ : Any = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
a_ : Dict = {'mustc': MUSTC_LANGS}
class _snake_case ( A__ ):
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = MAX_MODEL_INPUT_SIZES
_lowercase : Optional[int] = ['''input_ids''', '''attention_mask''']
_lowercase : List[int] = []
def __init__( self , a , a , a="<s>" , a="</s>" , a="<pad>" , a="<unk>" , a=False , a=False , a=None , a=None , a = None , **a , ) -> None:
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , pad_token=a , do_upper_case=a , do_lower_case=a , tgt_lang=a , lang_codes=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
SCREAMING_SNAKE_CASE = do_upper_case
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = load_json(a)
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = spm_file
SCREAMING_SNAKE_CASE = load_spm(a , self.sp_model_kwargs)
if lang_codes is not None:
SCREAMING_SNAKE_CASE = lang_codes
SCREAMING_SNAKE_CASE = LANGUAGES[lang_codes]
SCREAMING_SNAKE_CASE = [f'''<lang:{lang}>''' for lang in self.langs]
SCREAMING_SNAKE_CASE = {lang: self.sp_model.PieceToId(f'''<lang:{lang}>''') for lang in self.langs}
SCREAMING_SNAKE_CASE = self.lang_tokens
SCREAMING_SNAKE_CASE = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang)
else:
SCREAMING_SNAKE_CASE = {}
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return len(self.encoder)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> str:
return self._tgt_lang
@tgt_lang.setter
def SCREAMING_SNAKE_CASE__ ( self , a) -> None:
SCREAMING_SNAKE_CASE = new_tgt_lang
self.set_tgt_lang_special_tokens(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> None:
SCREAMING_SNAKE_CASE = self.lang_code_to_id[tgt_lang]
SCREAMING_SNAKE_CASE = [lang_code_id]
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[str]:
return self.sp_model.encode(a , out_type=a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple:
return self.encoder.get(a , self.encoder[self.unk_token])
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
return self.decoder.get(a , self.unk_token)
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
SCREAMING_SNAKE_CASE = self.sp_model.decode(a)
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(a)
SCREAMING_SNAKE_CASE = self.sp_model.decode(a)
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , a , a=None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a)
SCREAMING_SNAKE_CASE = [1] * len(self.prefix_tokens)
SCREAMING_SNAKE_CASE = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(a)) + suffix_ones
return prefix_ones + ([0] * len(a)) + ([0] * len(a)) + suffix_ones
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = self.encoder.copy()
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Dict:
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , a) -> None:
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = load_spm(self.spm_file , self.sp_model_kwargs)
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]:
SCREAMING_SNAKE_CASE = Path(a)
assert save_dir.is_dir(), f'''{save_directory} should be a directory'''
SCREAMING_SNAKE_CASE = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
SCREAMING_SNAKE_CASE = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , a)
if os.path.abspath(self.spm_file) != os.path.abspath(a) and os.path.isfile(self.spm_file):
copyfile(self.spm_file , a)
elif not os.path.isfile(self.spm_file):
with open(a , 'wb') as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(a)
return (str(a), str(a))
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = sentencepiece.SentencePieceProcessor(**_UpperCAmelCase)
spm.Load(str(_UpperCAmelCase))
return spm
def lowerCamelCase__ (_UpperCAmelCase):
with open(_UpperCAmelCase , 'r') as f:
return json.load(_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
with open(_UpperCAmelCase , 'w') as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=2)
| 73 |
import heapq as hq
import math
from collections.abc import Iterator
class _snake_case :
def __init__( self , a) -> Optional[Any]:
SCREAMING_SNAKE_CASE = str(id_)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = {} # {vertex:distance}
def __lt__( self , a) -> Dict:
return self.key < other.key
def __repr__( self) -> Optional[Any]:
return self.id
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]:
self.neighbors.append(a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Tuple:
SCREAMING_SNAKE_CASE = weight
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1])
graph[b - 1].add_neighbor(graph[a - 1])
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCAmelCase)
graph[b - 1].add_edge(graph[a - 1] , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
for u in graph:
SCREAMING_SNAKE_CASE = math.inf
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = graph[:]
while q:
SCREAMING_SNAKE_CASE = min(_UpperCAmelCase)
q.remove(_UpperCAmelCase)
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE = u
SCREAMING_SNAKE_CASE = u.edges[v.id]
for i in range(1 , len(_UpperCAmelCase)):
a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1))
return a
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
for u in graph:
SCREAMING_SNAKE_CASE = math.inf
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = list(_UpperCAmelCase)
hq.heapify(_UpperCAmelCase)
while h:
SCREAMING_SNAKE_CASE = hq.heappop(_UpperCAmelCase)
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE = u
SCREAMING_SNAKE_CASE = u.edges[v.id]
hq.heapify(_UpperCAmelCase)
for i in range(1 , len(_UpperCAmelCase)):
yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1)
def lowerCamelCase__ ():
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
import re
def lowerCamelCase__ (_UpperCAmelCase):
if len(re.findall('[ATCG]' , _UpperCAmelCase)) != len(_UpperCAmelCase):
raise ValueError('Invalid Strand')
return dna.translate(dna.maketrans('ATCG' , 'TAGC'))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[Any] = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 73 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _snake_case ( yaml.SafeLoader ):
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
SCREAMING_SNAKE_CASE = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE = [tuple(a) if isinstance(a , a) else key for key in keys]
SCREAMING_SNAKE_CASE = Counter(a)
SCREAMING_SNAKE_CASE = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''')
def SCREAMING_SNAKE_CASE__ ( self , a , a=False) -> str:
SCREAMING_SNAKE_CASE = super().construct_mapping(a , deep=a)
self._check_no_duplicates_on_constructed_node(a)
return mapping
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = list(readme_content.splitlines())
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE = full_content[1:].index('---') + 1
SCREAMING_SNAKE_CASE = '\n'.join(full_content[1:sep_idx])
return yamlblock, "\n".join(full_content[sep_idx + 1 :])
return None, "\n".join(_UpperCAmelCase)
class _snake_case ( A__ ):
# class attributes
_lowercase : Optional[Any] = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , a) -> "DatasetMetadata":
with open(a , encoding='utf-8') as readme_file:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _split_yaml_from_readme(readme_file.read())
if yaml_string is not None:
return cls.from_yaml_string(a)
else:
return cls()
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple:
if path.exists():
with open(a , encoding='utf-8') as readme_file:
SCREAMING_SNAKE_CASE = readme_file.read()
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self._to_readme(a)
with open(a , 'w' , encoding='utf-8') as readme_file:
readme_file.write(a)
def SCREAMING_SNAKE_CASE__ ( self , a = None) -> str:
if readme_content is not None:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _split_yaml_from_readme(a)
SCREAMING_SNAKE_CASE = '---\n' + self.to_yaml_string() + '---\n' + content
else:
SCREAMING_SNAKE_CASE = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , a) -> "DatasetMetadata":
SCREAMING_SNAKE_CASE = yaml.load(a , Loader=_NoDuplicateSafeLoader) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE = {
(key.replace('-' , '_') if key.replace('-' , '_') in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
return yaml.safe_dump(
{
(key.replace('_' , '-') if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=a , allow_unicode=a , encoding='utf-8' , ).decode('utf-8')
a_ : Optional[int] = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
a_ : int = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
a_ : Optional[Any] = ap.parse_args()
a_ : Union[str, Any] = Path(args.readme_filepath)
a_ : Optional[Any] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 73 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Union[str, Any] = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _snake_case ( A__ ):
_lowercase : Optional[Any] = '''decision_transformer'''
_lowercase : str = ['''past_key_values''']
_lowercase : Union[str, Any] = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a=17 , a=4 , a=128 , a=4096 , a=True , a=1 , a=1024 , a=3 , a=1 , a=None , a="relu" , a=0.1 , a=0.1 , a=0.1 , a=1E-5 , a=0.02 , a=True , a=True , a=5_0256 , a=5_0256 , a=False , a=False , **a , ) -> List[str]:
SCREAMING_SNAKE_CASE = state_dim
SCREAMING_SNAKE_CASE = act_dim
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = max_ep_len
SCREAMING_SNAKE_CASE = action_tanh
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = n_inner
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = attn_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scale_attn_weights
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(bos_token_id=a , eos_token_id=a , **a)
| 73 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _snake_case ( A__ ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( a) -> Union[str, Any]:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
raise NotImplementedError()
| 73 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ : Optional[int] = 16
a_ : Any = 32
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 16):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('bert-base-cased')
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc')
def tokenize_function(_UpperCAmelCase):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(_UpperCAmelCase):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE = 8
else:
SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
_UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
# Initialize accelerator
SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE = config['lr']
SCREAMING_SNAKE_CASE = int(config['num_epochs'])
SCREAMING_SNAKE_CASE = int(config['seed'])
SCREAMING_SNAKE_CASE = int(config['batch_size'])
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc')
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE = model.to(accelerator.device)
# Instantiate optimizer
SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=_UpperCAmelCase)
# Instantiate scheduler
SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# Now we train the model
for epoch in range(_UpperCAmelCase):
model.train()
for step, batch in enumerate(_UpperCAmelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.loss
SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _UpperCAmelCase)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
main()
| 73 | 1 |
class _snake_case :
def __init__( self , a) -> None:
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = [0] * size
SCREAMING_SNAKE_CASE = [0] * size
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a) -> int:
return index | (index + 1)
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a) -> int:
return (index & (index + 1)) - 1
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> None:
SCREAMING_SNAKE_CASE = value
while index < self.size:
SCREAMING_SNAKE_CASE = self.get_prev(a) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = max(a , a , a)
SCREAMING_SNAKE_CASE = self.get_next(a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE = 0
while left <= right:
SCREAMING_SNAKE_CASE = self.get_prev(a)
if left <= current_left:
SCREAMING_SNAKE_CASE = max(a , self.tree[right])
SCREAMING_SNAKE_CASE = current_left
else:
SCREAMING_SNAKE_CASE = max(a , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : int = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 73 | 1 |
import sys
import turtle
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
if depth == 0:
return
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
a_ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
a_ : str = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 73 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False):
if radian_mode:
return [magnitude * cos(_UpperCAmelCase), magnitude * sin(_UpperCAmelCase)]
return [magnitude * cos(radians(_UpperCAmelCase)), magnitude * sin(radians(_UpperCAmelCase))]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 10**-1):
SCREAMING_SNAKE_CASE = cross(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = sum(_UpperCAmelCase)
return abs(_UpperCAmelCase) < eps
if __name__ == "__main__":
# Test to check if it works
a_ : int = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
a_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
a_ : Dict = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
a_ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
a_ : int = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
a_ : Optional[Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 | 1 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class _snake_case :
def __init__( self , a , a = 13 , a = 64 , a = 2 , a = 3 , a = 3 , a = True , a = True , a = 128 , a=[16, 32, 64, 128] , a = 7 , a = 4 , a = 37 , a = "gelu" , a = 0.1 , a = 0.1 , a = 10 , a = 0.02 , a = 2 , a = 1 , a = 128 , a = [2, 2, 2, 2] , a = 2 , a = 2 , ) -> str:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = encoder_stride
SCREAMING_SNAKE_CASE = num_attention_outputs
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = embed_dim + 1
SCREAMING_SNAKE_CASE = resolution
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = dim
SCREAMING_SNAKE_CASE = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = TFEfficientFormerModel(config=a)
SCREAMING_SNAKE_CASE = model(a , training=a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> int:
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = TFEfficientFormerForImageClassification(a)
SCREAMING_SNAKE_CASE = model(a , labels=a , training=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = TFEfficientFormerForImageClassification(a)
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE = model(a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : Dict = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowercase : List[str] = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowercase : Optional[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Optional[int] = False
_lowercase : str = False
_lowercase : Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = TFEfficientFormerModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=a , has_text_modality=a , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds')
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings')
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(a)
SCREAMING_SNAKE_CASE = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
def check_hidden_states_output(a , a , a):
SCREAMING_SNAKE_CASE = model_class(a)
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(a , a) , training=a)
SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(a) , a)
if hasattr(self.model_tester , 'encoder_seq_length'):
SCREAMING_SNAKE_CASE = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1:
SCREAMING_SNAKE_CASE = seq_length * self.model_tester.chunk_length
else:
SCREAMING_SNAKE_CASE = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
SCREAMING_SNAKE_CASE = outputs.decoder_hidden_states
self.asseretIsInstance(a , (list, tuple))
self.assertEqual(len(a) , a)
SCREAMING_SNAKE_CASE = getattr(self.model_tester , 'seq_length' , a)
SCREAMING_SNAKE_CASE = getattr(self.model_tester , 'decoder_seq_length' , a)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(a , a , a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(a , a , a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a=False) -> Optional[Any]:
SCREAMING_SNAKE_CASE = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet')
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFEfficientFormerModel.from_pretrained(a)
self.assertIsNotNone(a)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = getattr(self.model_tester , 'seq_length' , a)
SCREAMING_SNAKE_CASE = getattr(self.model_tester , 'encoder_seq_length' , a)
SCREAMING_SNAKE_CASE = getattr(self.model_tester , 'key_length' , a)
SCREAMING_SNAKE_CASE = getattr(self.model_tester , 'chunk_length' , a)
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'):
SCREAMING_SNAKE_CASE = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(a)
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(a , a) , training=a)
SCREAMING_SNAKE_CASE = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(a)
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(a , a) , training=a)
SCREAMING_SNAKE_CASE = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
SCREAMING_SNAKE_CASE = model_class(a)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
SCREAMING_SNAKE_CASE = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=a)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
SCREAMING_SNAKE_CASE = model(a)
self.assertTrue(outputs_dict is not None)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_tf
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300')
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=a , return_tensors='tf')
# forward pass
SCREAMING_SNAKE_CASE = model(**a , training=a)
# verify the logits
SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , a)
SCREAMING_SNAKE_CASE = tf.constant([-0.05_55, 0.48_25, -0.08_52])
self.assertTrue(np.allclose(outputs.logits[0, :3] , a , atol=1E-4))
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300')
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=a , return_tensors='tf')
# forward pass
SCREAMING_SNAKE_CASE = model(**a , training=a)
# verify the logits
SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , a)
SCREAMING_SNAKE_CASE = tf.constant([-0.13_12, 0.43_53, -1.04_99])
self.assertTrue(np.allclose(outputs.logits[0, :3] , a , atol=1E-4))
| 73 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : int = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class _snake_case ( A__ ):
_lowercase : Dict = '''cvt'''
def __init__( self , a=3 , a=[7, 3, 3] , a=[4, 2, 2] , a=[2, 1, 1] , a=[64, 192, 384] , a=[1, 3, 6] , a=[1, 2, 10] , a=[4.0, 4.0, 4.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.1] , a=[True, True, True] , a=[False, False, True] , a=["dw_bn", "dw_bn", "dw_bn"] , a=[3, 3, 3] , a=[1, 1, 1] , a=[2, 2, 2] , a=[1, 1, 1] , a=[1, 1, 1] , a=0.02 , a=1E-12 , **a , ) -> List[Any]:
super().__init__(**a)
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = patch_stride
SCREAMING_SNAKE_CASE = patch_padding
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = depth
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = attention_drop_rate
SCREAMING_SNAKE_CASE = drop_rate
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = cls_token
SCREAMING_SNAKE_CASE = qkv_projection_method
SCREAMING_SNAKE_CASE = kernel_qkv
SCREAMING_SNAKE_CASE = padding_kv
SCREAMING_SNAKE_CASE = stride_kv
SCREAMING_SNAKE_CASE = padding_q
SCREAMING_SNAKE_CASE = stride_q
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
| 73 | 1 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase__ (_UpperCAmelCase):
return 1.0 / (1.0 + np.exp(-_outputs))
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = np.max(_outputs , axis=-1 , keepdims=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = np.exp(_outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_UpperCAmelCase)
class _snake_case ( A__ ):
_lowercase : Tuple = '''sigmoid'''
_lowercase : List[str] = '''softmax'''
_lowercase : Tuple = '''none'''
@add_end_docstrings(
A__ , R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class _snake_case ( A__ ):
_lowercase : Optional[Any] = False
_lowercase : Tuple = ClassificationFunction.NONE
def __init__( self , **a) -> Optional[Any]:
super().__init__(**a)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
def SCREAMING_SNAKE_CASE__ ( self , a=None , a=None , a="" , **a) -> Tuple:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
SCREAMING_SNAKE_CASE = tokenizer_kwargs
SCREAMING_SNAKE_CASE = {}
if hasattr(self.model.config , 'return_all_scores') and return_all_scores is None:
SCREAMING_SNAKE_CASE = self.model.config.return_all_scores
if isinstance(a , a) or top_k is None:
SCREAMING_SNAKE_CASE = top_k
SCREAMING_SNAKE_CASE = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , a , )
if return_all_scores:
SCREAMING_SNAKE_CASE = None
else:
SCREAMING_SNAKE_CASE = 1
if isinstance(a , a):
SCREAMING_SNAKE_CASE = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
SCREAMING_SNAKE_CASE = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *a , **a) -> Optional[int]:
SCREAMING_SNAKE_CASE = super().__call__(*a , **a)
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
SCREAMING_SNAKE_CASE = 'top_k' not in kwargs
if isinstance(args[0] , a) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def SCREAMING_SNAKE_CASE__ ( self , a , **a) -> Dict[str, GenericTensor]:
SCREAMING_SNAKE_CASE = self.framework
if isinstance(a , a):
return self.tokenizer(**a , return_tensors=a , **a)
elif isinstance(a , a) and len(a) == 1 and isinstance(inputs[0] , a) and len(inputs[0]) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a , **a)
elif isinstance(a , a):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.')
return self.tokenizer(a , return_tensors=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]:
return self.model(**a)
def SCREAMING_SNAKE_CASE__ ( self , a , a=None , a=1 , a=True) -> Any:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
SCREAMING_SNAKE_CASE = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
SCREAMING_SNAKE_CASE = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply') and function_to_apply is None:
SCREAMING_SNAKE_CASE = self.model.config.function_to_apply
else:
SCREAMING_SNAKE_CASE = ClassificationFunction.NONE
SCREAMING_SNAKE_CASE = model_outputs['logits'][0]
SCREAMING_SNAKE_CASE = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
SCREAMING_SNAKE_CASE = sigmoid(a)
elif function_to_apply == ClassificationFunction.SOFTMAX:
SCREAMING_SNAKE_CASE = softmax(a)
elif function_to_apply == ClassificationFunction.NONE:
SCREAMING_SNAKE_CASE = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''')
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
SCREAMING_SNAKE_CASE = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(a)
]
if not _legacy:
dict_scores.sort(key=lambda a: x["score"] , reverse=a)
if top_k is not None:
SCREAMING_SNAKE_CASE = dict_scores[:top_k]
return dict_scores
| 73 |
def lowerCamelCase__ (_UpperCAmelCase = 10 , _UpperCAmelCase = 1000 , _UpperCAmelCase = True):
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase)
and isinstance(_UpperCAmelCase , _UpperCAmelCase)
and isinstance(_UpperCAmelCase , _UpperCAmelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)')
return min_val if option else max_val
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return int((number_a + number_a) / 2)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)')
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value')
def answer(_UpperCAmelCase) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...')
SCREAMING_SNAKE_CASE = lower
SCREAMING_SNAKE_CASE = higher
SCREAMING_SNAKE_CASE = []
while True:
SCREAMING_SNAKE_CASE = get_avg(_UpperCAmelCase , _UpperCAmelCase)
last_numbers.append(_UpperCAmelCase)
if answer(_UpperCAmelCase) == "low":
SCREAMING_SNAKE_CASE = number
elif answer(_UpperCAmelCase) == "high":
SCREAMING_SNAKE_CASE = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''')
print(F'''details : {last_numbers!s}''')
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = int(input('Enter lower value : ').strip())
SCREAMING_SNAKE_CASE = int(input('Enter high value : ').strip())
SCREAMING_SNAKE_CASE = int(input('Enter value to guess : ').strip())
guess_the_number(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
main()
| 73 | 1 |
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
print('\nThe shortest path matrix using Floyd Warshall algorithm\n')
for i in range(_UpperCAmelCase):
for j in range(_UpperCAmelCase):
if dist[i][j] != float('inf'):
print(int(dist[i][j]) , end='\t')
else:
print('INF' , end='\t')
print()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = [[float('inf') for _ in range(_UpperCAmelCase)] for _ in range(_UpperCAmelCase)]
for i in range(_UpperCAmelCase):
for j in range(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_UpperCAmelCase):
# looping through rows of graph array
for i in range(_UpperCAmelCase):
# looping through columns of graph array
for j in range(_UpperCAmelCase):
if (
dist[i][k] != float('inf')
and dist[k][j] != float('inf')
and dist[i][k] + dist[k][j] < dist[i][j]
):
SCREAMING_SNAKE_CASE = dist[i][k] + dist[k][j]
_print_dist(_UpperCAmelCase , _UpperCAmelCase)
return dist, v
if __name__ == "__main__":
a_ : Dict = int(input('Enter number of vertices: '))
a_ : str = int(input('Enter number of edges: '))
a_ : Tuple = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
a_ : Dict = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
a_ : str = int(input('Enter source:'))
a_ : Any = int(input('Enter destination:'))
a_ : List[Any] = float(input('Enter weight:'))
a_ : str = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 73 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _snake_case :
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=False , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , use_stable_embedding=a , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Any:
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , )
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> int:
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a)
model.to(a)
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size)
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1)
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1)
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1]).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3))
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( A__ , A__ , A__ , unittest.TestCase ):
_lowercase : List[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_lowercase : str = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_lowercase : List[str] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : List[str] = False
_lowercase : Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 10] , config.vocab_size)
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
original_model.to(a)
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
scaled_model.to(a)
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5))
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5))
| 73 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _snake_case ( A__ , unittest.TestCase ):
_lowercase : Tuple = XLNetTokenizer
_lowercase : List[Any] = XLNetTokenizerFast
_lowercase : int = True
_lowercase : Any = True
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE = XLNetTokenizer(a , keep_accents=a)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = '<s>'
SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a) , a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a) , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<unk>')
self.assertEqual(vocab_keys[1] , '<s>')
self.assertEqual(vocab_keys[-1] , '<eod>')
self.assertEqual(len(a) , 1006)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = XLNetTokenizer(a , keep_accents=a)
SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test')
self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [285, 46, 10, 170, 382])
SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(a)
self.assertListEqual(a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4])
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(a)
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = XLNetTokenizer(a , do_lower_case=a)
SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
a , [
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['▁he', 'll', 'o'])
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = XLNetTokenizer(a , do_lower_case=a)
SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = XLNetTokenizer.from_pretrained('xlnet-base-cased')
SCREAMING_SNAKE_CASE = tokenizer.encode('sequence builders' , add_special_tokens=a)
SCREAMING_SNAKE_CASE = tokenizer.encode('multi-sequence build' , add_special_tokens=a)
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(a)
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(a , a)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
# fmt: off
SCREAMING_SNAKE_CASE = {'input_ids': [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
| 73 |
from __future__ import annotations
a_ : str = []
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
for i in range(len(_UpperCAmelCase)):
if board[row][i] == 1:
return False
for i in range(len(_UpperCAmelCase)):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , -1 , -1)):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , len(_UpperCAmelCase))):
if board[i][j] == 1:
return False
return True
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
if row >= len(_UpperCAmelCase):
solution.append(_UpperCAmelCase)
printboard(_UpperCAmelCase)
print()
return True
for i in range(len(_UpperCAmelCase)):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 1
solve(_UpperCAmelCase , row + 1)
SCREAMING_SNAKE_CASE = 0
return False
def lowerCamelCase__ (_UpperCAmelCase):
for i in range(len(_UpperCAmelCase)):
for j in range(len(_UpperCAmelCase)):
if board[i][j] == 1:
print('Q' , end=' ')
else:
print('.' , end=' ')
print()
# n=int(input("The no. of queens"))
a_ : Tuple = 8
a_ : int = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 73 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[int] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 73 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : List[Any] = StableDiffusionDiffEditPipeline
_lowercase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
_lowercase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
_lowercase : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowercase : List[str] = frozenset([] )
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
SCREAMING_SNAKE_CASE = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_zero=a , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(a)
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 16, 16) , rng=random.Random(a)).to(a)
SCREAMING_SNAKE_CASE = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a)).to(a)
if str(a).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a)
SCREAMING_SNAKE_CASE = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a)
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB')
if str(a).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a)
SCREAMING_SNAKE_CASE = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> Optional[int]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a)
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB')
if str(a).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a)
SCREAMING_SNAKE_CASE = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
if not hasattr(self.pipeline_class , '_optional_components'):
return
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a , a , a)
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components})
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a)
SCREAMING_SNAKE_CASE = pipe(**a)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a)
SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(a)
pipe_loaded.to(a)
pipe_loaded.set_progress_bar_config(disable=a)
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a , a) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a)
SCREAMING_SNAKE_CASE = pipe_loaded(**a)[0]
SCREAMING_SNAKE_CASE = np.abs(output - output_loaded).max()
self.assertLess(a , 1E-4)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_dummy_mask_inputs(a)
SCREAMING_SNAKE_CASE = pipe.generate_mask(**a)
SCREAMING_SNAKE_CASE = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16))
SCREAMING_SNAKE_CASE = np.array([0] * 9)
SCREAMING_SNAKE_CASE = np.abs(mask_slice.flatten() - expected_slice).max()
self.assertLessEqual(a , 1E-3)
self.assertEqual(mask[0, -3, -4] , 0)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(a)
SCREAMING_SNAKE_CASE = pipe.invert(**a).images
SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
SCREAMING_SNAKE_CASE = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(a , 1E-3)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=5E-3)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler(**a)
SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler(**a)
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(a)
SCREAMING_SNAKE_CASE = pipe.invert(**a).images
SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
SCREAMING_SNAKE_CASE = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(a , 1E-3)
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls) -> List[Any]:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png')
SCREAMING_SNAKE_CASE = raw_image.convert('RGB').resize((768, 768))
SCREAMING_SNAKE_CASE = raw_image
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = torch.manual_seed(0)
SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE = DDIMScheduler.from_config(pipe.scheduler.config)
SCREAMING_SNAKE_CASE = DDIMInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = 'a bowl of fruit'
SCREAMING_SNAKE_CASE = 'a bowl of pears'
SCREAMING_SNAKE_CASE = pipe.generate_mask(
image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , )
SCREAMING_SNAKE_CASE = pipe.invert(
prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a).latents
SCREAMING_SNAKE_CASE = pipe(
prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
SCREAMING_SNAKE_CASE = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png').resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = torch.manual_seed(0)
SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = 'a bowl of fruit'
SCREAMING_SNAKE_CASE = 'a bowl of pears'
SCREAMING_SNAKE_CASE = pipe.generate_mask(
image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , )
SCREAMING_SNAKE_CASE = pipe.invert(
prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a , num_inference_steps=25 , ).latents
SCREAMING_SNAKE_CASE = pipe(
prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
SCREAMING_SNAKE_CASE = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png').resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
| 73 | 1 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
a_ : List[Any] = None
a_ : str = {
'7B': 1_10_08,
'13B': 1_38_24,
'30B': 1_79_20,
'65B': 2_20_16,
'70B': 2_86_72,
}
a_ : str = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=1 , _UpperCAmelCase=256):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of)
def lowerCamelCase__ (_UpperCAmelCase):
with open(_UpperCAmelCase , 'r') as f:
return json.load(_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
with open(_UpperCAmelCase , 'w') as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=True):
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'tmp')
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = read_json(os.path.join(_UpperCAmelCase , 'params.json'))
SCREAMING_SNAKE_CASE = NUM_SHARDS[model_size]
SCREAMING_SNAKE_CASE = params['n_layers']
SCREAMING_SNAKE_CASE = params['n_heads']
SCREAMING_SNAKE_CASE = n_heads // num_shards
SCREAMING_SNAKE_CASE = params['dim']
SCREAMING_SNAKE_CASE = dim // n_heads
SCREAMING_SNAKE_CASE = 1_00_00.0
SCREAMING_SNAKE_CASE = 1.0 / (base ** (torch.arange(0 , _UpperCAmelCase , 2).float() / dims_per_head))
if "n_kv_heads" in params:
SCREAMING_SNAKE_CASE = params['n_kv_heads'] # for GQA / MQA
SCREAMING_SNAKE_CASE = n_heads_per_shard // num_key_value_heads
SCREAMING_SNAKE_CASE = dim // num_key_value_heads
else: # compatibility with other checkpoints
SCREAMING_SNAKE_CASE = n_heads
SCREAMING_SNAKE_CASE = n_heads_per_shard
SCREAMING_SNAKE_CASE = dim
# permute for sliced rotary
def permute(_UpperCAmelCase , _UpperCAmelCase=n_heads , _UpperCAmelCase=dim , _UpperCAmelCase=dim):
return w.view(_UpperCAmelCase , dima // n_heads // 2 , 2 , _UpperCAmelCase).transpose(1 , 2).reshape(_UpperCAmelCase , _UpperCAmelCase)
print(F'''Fetching all parameters from the checkpoint at {input_base_path}.''')
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
SCREAMING_SNAKE_CASE = torch.load(os.path.join(_UpperCAmelCase , 'consolidated.00.pth') , map_location='cpu')
else:
# Sharded
SCREAMING_SNAKE_CASE = [
torch.load(os.path.join(_UpperCAmelCase , F'''consolidated.{i:02d}.pth''') , map_location='cpu')
for i in range(_UpperCAmelCase)
]
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = {'weight_map': {}}
for layer_i in range(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = F'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE = {
F'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wq.weight''']),
F'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wk.weight''']),
F'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[F'''layers.{layer_i}.attention.wv.weight'''],
F'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[F'''layers.{layer_i}.attention.wo.weight'''],
F'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w1.weight'''],
F'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w2.weight'''],
F'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w3.weight'''],
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[F'''layers.{layer_i}.attention_norm.weight'''],
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[F'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
SCREAMING_SNAKE_CASE = {
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.attention_norm.weight'''
].clone(),
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
SCREAMING_SNAKE_CASE = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wq.weight'''].view(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
for i in range(_UpperCAmelCase)
] , dim=0 , ).reshape(_UpperCAmelCase , _UpperCAmelCase))
SCREAMING_SNAKE_CASE = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wk.weight'''].view(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
for i in range(_UpperCAmelCase)
] , dim=0 , ).reshape(_UpperCAmelCase , _UpperCAmelCase) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
SCREAMING_SNAKE_CASE = torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wv.weight'''].view(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
for i in range(_UpperCAmelCase)
] , dim=0 , ).reshape(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = torch.cat(
[loaded[i][F'''layers.{layer_i}.attention.wo.weight'''] for i in range(_UpperCAmelCase)] , dim=1)
SCREAMING_SNAKE_CASE = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_UpperCAmelCase)] , dim=0)
SCREAMING_SNAKE_CASE = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_UpperCAmelCase)] , dim=1)
SCREAMING_SNAKE_CASE = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_UpperCAmelCase)] , dim=0)
SCREAMING_SNAKE_CASE = inv_freq
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE = filename
param_count += v.numel()
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase))
SCREAMING_SNAKE_CASE = F'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
SCREAMING_SNAKE_CASE = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(_UpperCAmelCase)] , dim=1),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_UpperCAmelCase)] , dim=0),
}
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE = filename
param_count += v.numel()
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase))
# Write configs
SCREAMING_SNAKE_CASE = {'total_size': param_count * 2}
write_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'pytorch_model.bin.index.json'))
SCREAMING_SNAKE_CASE = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
SCREAMING_SNAKE_CASE = params['multiple_of'] if 'multiple_of' in params else 256
SCREAMING_SNAKE_CASE = LlamaConfig(
hidden_size=_UpperCAmelCase , intermediate_size=compute_intermediate_size(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_UpperCAmelCase , )
config.save_pretrained(_UpperCAmelCase)
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.')
SCREAMING_SNAKE_CASE = LlamaForCausalLM.from_pretrained(_UpperCAmelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=_UpperCAmelCase)
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.')
model.save_pretrained(_UpperCAmelCase , safe_serialization=_UpperCAmelCase)
shutil.rmtree(_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
# Initialize the tokenizer based on the `spm` model
SCREAMING_SNAKE_CASE = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''')
SCREAMING_SNAKE_CASE = tokenizer_class(_UpperCAmelCase)
tokenizer.save_pretrained(_UpperCAmelCase)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=_UpperCAmelCase , help='Whether or not to save using `safetensors`.')
SCREAMING_SNAKE_CASE = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
SCREAMING_SNAKE_CASE = os.path.join(args.input_dir , 'tokenizer.model')
write_tokenizer(args.output_dir , _UpperCAmelCase)
if __name__ == "__main__":
main()
| 73 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : Any = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _snake_case ( A__ ):
_lowercase : Optional[int] = '''unispeech'''
def __init__( self , a=32 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=0.1 , a=0.0 , a=0.0 , a=0.1 , a=0.1 , a=0.02 , a=1E-5 , a="group" , a="gelu" , a=(512, 512, 512, 512, 512, 512, 512) , a=(5, 2, 2, 2, 2, 2, 2) , a=(10, 3, 3, 3, 3, 2, 2) , a=False , a=128 , a=16 , a=False , a=True , a=0.05 , a=10 , a=2 , a=0.0 , a=10 , a=0 , a=320 , a=2 , a=0.1 , a=100 , a=256 , a=256 , a=0.1 , a="mean" , a=False , a=False , a=256 , a=80 , a=0 , a=1 , a=2 , a=0.5 , **a , ) -> Optional[int]:
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a)
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = feat_extract_norm
SCREAMING_SNAKE_CASE = feat_extract_activation
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = conv_bias
SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE = len(self.conv_dim)
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = feat_proj_dropout
SCREAMING_SNAKE_CASE = final_dropout
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_ctc_classes
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = do_stable_layer_norm
SCREAMING_SNAKE_CASE = use_weighted_layer_sum
SCREAMING_SNAKE_CASE = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE = apply_spec_augment
SCREAMING_SNAKE_CASE = mask_time_prob
SCREAMING_SNAKE_CASE = mask_time_length
SCREAMING_SNAKE_CASE = mask_time_min_masks
SCREAMING_SNAKE_CASE = mask_feature_prob
SCREAMING_SNAKE_CASE = mask_feature_length
SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE = num_codevectors_per_group
SCREAMING_SNAKE_CASE = num_codevector_groups
SCREAMING_SNAKE_CASE = contrastive_logits_temperature
SCREAMING_SNAKE_CASE = feat_quantizer_dropout
SCREAMING_SNAKE_CASE = num_negatives
SCREAMING_SNAKE_CASE = codevector_dim
SCREAMING_SNAKE_CASE = proj_codevector_dim
SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE = ctc_loss_reduction
SCREAMING_SNAKE_CASE = ctc_zero_infinity
# pretraining loss
SCREAMING_SNAKE_CASE = replace_prob
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 73 | 1 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( A__ , unittest.TestCase ):
_lowercase : Tuple = CodeGenTokenizer
_lowercase : Tuple = CodeGenTokenizerFast
_lowercase : Optional[int] = True
_lowercase : List[str] = {'''add_prefix_space''': True}
_lowercase : Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
SCREAMING_SNAKE_CASE = dict(zip(a , range(len(a))))
SCREAMING_SNAKE_CASE = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(a) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(a))
def SCREAMING_SNAKE_CASE__ ( self , **a) -> Any:
kwargs.update(self.special_tokens_map)
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a)
def SCREAMING_SNAKE_CASE__ ( self , **a) -> Optional[Any]:
kwargs.update(self.special_tokens_map)
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple:
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = 'lower newer'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE = tokenizer.tokenize(a , add_prefix_space=a)
self.assertListEqual(a , a)
SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(add_prefix_space=a)
SCREAMING_SNAKE_CASE = 'lower newer'
# Testing tokenization
SCREAMING_SNAKE_CASE = tokenizer.tokenize(a , add_prefix_space=a)
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE = tokenizer.encode(a , add_special_tokens=a , add_prefix_space=a)
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(add_prefix_space=a)
SCREAMING_SNAKE_CASE = tokenizer.encode(a , add_prefix_space=a)
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
# Testing the unknown token
SCREAMING_SNAKE_CASE = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a) , a)
def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> str:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def SCREAMING_SNAKE_CASE__ ( self , a=15) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(a , **a)
# Simple input
SCREAMING_SNAKE_CASE = 'This is a simple input'
SCREAMING_SNAKE_CASE = ['This is a simple input 1', 'This is a simple input 2']
SCREAMING_SNAKE_CASE = ('This is a simple input', 'This is a pair')
SCREAMING_SNAKE_CASE = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length')
# Simple input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length')
# Simple input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , )
# Pair input
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length')
# Pair input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length')
# Pair input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , )
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>')
# Simple input
SCREAMING_SNAKE_CASE = 'This is a simple input'
SCREAMING_SNAKE_CASE = ['This is a simple input looooooooong', 'This is a simple input']
SCREAMING_SNAKE_CASE = ('This is a simple input', 'This is a pair')
SCREAMING_SNAKE_CASE = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
SCREAMING_SNAKE_CASE = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE = tokenizer(a , padding='max_length' , max_length=30 , return_tensors='np')
SCREAMING_SNAKE_CASE = tokenizer(a , padding=a , truncate=a , return_tensors='np')
SCREAMING_SNAKE_CASE = tokenizer(*a , padding='max_length' , max_length=60 , return_tensors='np')
SCREAMING_SNAKE_CASE = tokenizer(a , padding=a , truncate=a , return_tensors='np')
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30)
self.assertTrue(pad_token_id in out_s['input_ids'])
self.assertTrue(0 in out_s['attention_mask'])
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33)
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0])
self.assertFalse(0 in out_sa['attention_mask'][0])
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1])
self.assertTrue(0 in out_sa['attention_mask'][1])
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60)
self.assertTrue(pad_token_id in out_p['input_ids'])
self.assertTrue(0 in out_p['attention_mask'])
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52)
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0])
self.assertFalse(0 in out_pa['attention_mask'][0])
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1])
self.assertTrue(0 in out_pa['attention_mask'][1])
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = '$$$'
SCREAMING_SNAKE_CASE = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a , add_bos_token=a)
SCREAMING_SNAKE_CASE = 'This is a simple input'
SCREAMING_SNAKE_CASE = ['This is a simple input 1', 'This is a simple input 2']
SCREAMING_SNAKE_CASE = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE = tokenizer(a)
SCREAMING_SNAKE_CASE = tokenizer(a)
self.assertEqual(out_s.input_ids[0] , a)
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids))
SCREAMING_SNAKE_CASE = tokenizer.decode(out_s.input_ids)
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(out_sa.input_ids)
self.assertEqual(decode_s.split()[0] , a)
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa))
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono')
SCREAMING_SNAKE_CASE = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
SCREAMING_SNAKE_CASE = '\nif len_a > len_b: result = a\nelse: result = b'
SCREAMING_SNAKE_CASE = tokenizer.encode(a)
SCREAMING_SNAKE_CASE = ['^#', re.escape('<|endoftext|>'), '^\'\'\'', '^"""', '\n\n\n']
SCREAMING_SNAKE_CASE = tokenizer.decode(a , truncate_before_pattern=a)
self.assertEqual(a , a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
pass
| 73 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
a_ : Optional[Any] = re.compile(R'\b(a|an|the)\b', re.UNICODE)
a_ : List[str] = None
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.')
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.')
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.')
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).')
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.')
parser.add_argument(
'--na-prob-thresh' , '-t' , type=_UpperCAmelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=_UpperCAmelCase , help='Save precision-recall curves to directory.')
parser.add_argument('--verbose' , '-v' , action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE = bool(qa['answers']['text'])
return qid_to_has_ans
def lowerCamelCase__ (_UpperCAmelCase):
def remove_articles(_UpperCAmelCase):
return ARTICLES_REGEX.sub(' ' , _UpperCAmelCase)
def white_space_fix(_UpperCAmelCase):
return " ".join(text.split())
def remove_punc(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(_UpperCAmelCase):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase))))
def lowerCamelCase__ (_UpperCAmelCase):
if not s:
return []
return normalize_answer(_UpperCAmelCase).split()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return int(normalize_answer(_UpperCAmelCase) == normalize_answer(_UpperCAmelCase))
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = collections.Counter(_UpperCAmelCase) & collections.Counter(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = sum(common.values())
if len(_UpperCAmelCase) == 0 or len(_UpperCAmelCase) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE = qa['id']
SCREAMING_SNAKE_CASE = [t for t in qa['answers']['text'] if normalize_answer(_UpperCAmelCase)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
SCREAMING_SNAKE_CASE = ['']
if qid not in preds:
print(F'''Missing prediction for {qid}''')
continue
SCREAMING_SNAKE_CASE = preds[qid]
# Take max over all gold answers
SCREAMING_SNAKE_CASE = max(compute_exact(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers)
SCREAMING_SNAKE_CASE = max(compute_fa(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers)
return exact_scores, fa_scores
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
for qid, s in scores.items():
SCREAMING_SNAKE_CASE = na_probs[qid] > na_prob_thresh
if pred_na:
SCREAMING_SNAKE_CASE = float(not qid_to_has_ans[qid])
else:
SCREAMING_SNAKE_CASE = s
return new_scores
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None):
if not qid_list:
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores.values()) / total),
('f1', 1_00.0 * sum(fa_scores.values()) / total),
('total', total),
])
else:
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores[k] for k in qid_list) / total),
('f1', 1_00.0 * sum(fa_scores[k] for k in qid_list) / total),
('total', total),
])
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
for k in new_eval:
SCREAMING_SNAKE_CASE = new_eval[k]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
plt.step(_UpperCAmelCase , _UpperCAmelCase , color='b' , alpha=0.2 , where='post')
plt.fill_between(_UpperCAmelCase , _UpperCAmelCase , step='post' , alpha=0.2 , color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(_UpperCAmelCase)
plt.savefig(_UpperCAmelCase)
plt.clf()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None):
SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k])
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1.0
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = [1.0]
SCREAMING_SNAKE_CASE = [0.0]
SCREAMING_SNAKE_CASE = 0.0
for i, qid in enumerate(_UpperCAmelCase):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
SCREAMING_SNAKE_CASE = true_pos / float(i + 1)
SCREAMING_SNAKE_CASE = true_pos / float(_UpperCAmelCase)
if i == len(_UpperCAmelCase) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_UpperCAmelCase)
recalls.append(_UpperCAmelCase)
if out_image:
plot_pr_curve(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return {"ap": 1_00.0 * avg_prec}
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if out_image_dir and not os.path.exists(_UpperCAmelCase):
os.makedirs(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_exact.png') , title='Precision-Recall curve for Exact Match score' , )
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_f1.png') , title='Precision-Recall curve for F1 score' , )
SCREAMING_SNAKE_CASE = {k: float(_UpperCAmelCase) for k, v in qid_to_has_ans.items()}
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_oracle.png') , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_exact')
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_f1')
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_oracle')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if not qid_list:
return
SCREAMING_SNAKE_CASE = [na_probs[k] for k in qid_list]
SCREAMING_SNAKE_CASE = np.ones_like(_UpperCAmelCase) / float(len(_UpperCAmelCase))
plt.hist(_UpperCAmelCase , weights=_UpperCAmelCase , bins=20 , range=(0.0, 1.0))
plt.xlabel('Model probability of no-answer')
plt.ylabel('Proportion of dataset')
plt.title(F'''Histogram of no-answer probability: {name}''')
plt.savefig(os.path.join(_UpperCAmelCase , F'''na_prob_hist_{name}.png'''))
plt.clf()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
SCREAMING_SNAKE_CASE = num_no_ans
SCREAMING_SNAKE_CASE = cur_score
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k])
for i, qid in enumerate(_UpperCAmelCase):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
SCREAMING_SNAKE_CASE = scores[qid]
else:
if preds[qid]:
SCREAMING_SNAKE_CASE = -1
else:
SCREAMING_SNAKE_CASE = 0
cur_score += diff
if cur_score > best_score:
SCREAMING_SNAKE_CASE = cur_score
SCREAMING_SNAKE_CASE = na_probs[qid]
return 1_00.0 * best_score / len(_UpperCAmelCase), best_thresh
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = best_exact
SCREAMING_SNAKE_CASE = exact_thresh
SCREAMING_SNAKE_CASE = best_fa
SCREAMING_SNAKE_CASE = fa_thresh
def lowerCamelCase__ ():
with open(OPTS.data_file) as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = dataset_json['data']
with open(OPTS.pred_file) as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase)
else:
SCREAMING_SNAKE_CASE = {k: 0.0 for k in preds}
SCREAMING_SNAKE_CASE = make_qid_to_has_ans(_UpperCAmelCase) # maps qid to True/False
SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if v]
SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if not v]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_raw_scores(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh)
SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh)
SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase)
if has_ans_qids:
SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase)
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'HasAns')
if no_ans_qids:
SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase)
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'NoAns')
if OPTS.na_prob_file:
find_all_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir)
histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'hasAns')
histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'noAns')
if OPTS.out_file:
with open(OPTS.out_file , 'w') as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
else:
print(json.dumps(_UpperCAmelCase , indent=2))
if __name__ == "__main__":
a_ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 73 | 1 |
import heapq as hq
import math
from collections.abc import Iterator
class _snake_case :
def __init__( self , a) -> Optional[Any]:
SCREAMING_SNAKE_CASE = str(id_)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = {} # {vertex:distance}
def __lt__( self , a) -> Dict:
return self.key < other.key
def __repr__( self) -> Optional[Any]:
return self.id
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]:
self.neighbors.append(a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Tuple:
SCREAMING_SNAKE_CASE = weight
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1])
graph[b - 1].add_neighbor(graph[a - 1])
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCAmelCase)
graph[b - 1].add_edge(graph[a - 1] , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
for u in graph:
SCREAMING_SNAKE_CASE = math.inf
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = graph[:]
while q:
SCREAMING_SNAKE_CASE = min(_UpperCAmelCase)
q.remove(_UpperCAmelCase)
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE = u
SCREAMING_SNAKE_CASE = u.edges[v.id]
for i in range(1 , len(_UpperCAmelCase)):
a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1))
return a
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
for u in graph:
SCREAMING_SNAKE_CASE = math.inf
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = list(_UpperCAmelCase)
hq.heapify(_UpperCAmelCase)
while h:
SCREAMING_SNAKE_CASE = hq.heappop(_UpperCAmelCase)
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE = u
SCREAMING_SNAKE_CASE = u.edges[v.id]
hq.heapify(_UpperCAmelCase)
for i in range(1 , len(_UpperCAmelCase)):
yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1)
def lowerCamelCase__ ():
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ : Dict = logging.get_logger(__name__)
class _snake_case ( A__ ):
def __init__( self , *a , **a) -> None:
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , a , )
super().__init__(*a , **a)
| 73 | 1 |
import sys
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = [[0 for x in range(_UpperCAmelCase)] for x in range(_UpperCAmelCase)]
SCREAMING_SNAKE_CASE = [[0 for x in range(_UpperCAmelCase)] for x in range(_UpperCAmelCase)]
for chain_length in range(2 , _UpperCAmelCase):
for a in range(1 , n - chain_length + 1):
SCREAMING_SNAKE_CASE = a + chain_length - 1
SCREAMING_SNAKE_CASE = sys.maxsize
for c in range(_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
SCREAMING_SNAKE_CASE = cost
SCREAMING_SNAKE_CASE = c
return matrix, sol
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if i == j:
print('A' + str(_UpperCAmelCase) , end=' ')
else:
print('(' , end=' ')
print_optiomal_solution(_UpperCAmelCase , _UpperCAmelCase , optimal_solution[i][j])
print_optiomal_solution(_UpperCAmelCase , optimal_solution[i][j] + 1 , _UpperCAmelCase)
print(')' , end=' ')
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = [30, 35, 15, 5, 10, 20, 25]
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = matrix_chain_order(_UpperCAmelCase)
print('No. of Operation required: ' + str(matrix[1][n - 1]))
print_optiomal_solution(_UpperCAmelCase , 1 , n - 1)
if __name__ == "__main__":
main()
| 73 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _snake_case ( unittest.TestCase , A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = load_tool('text-classification')
self.tool.setup()
SCREAMING_SNAKE_CASE = load_tool('text-classification' , remote=a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(a , 'positive')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(a , 'positive')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(a , 'positive')
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(a , 'positive')
| 73 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : List[str] = {'vocab_file': 'sentencepiece.bpe.model'}
a_ : str = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
a_ : str = {
'moussaKam/mbarthez': 10_24,
'moussaKam/barthez': 10_24,
'moussaKam/barthez-orangesum-title': 10_24,
}
a_ : Tuple = '▁'
class _snake_case ( A__ ):
_lowercase : Dict = VOCAB_FILES_NAMES
_lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , a , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a = None , **a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else mask_token
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(a))
SCREAMING_SNAKE_CASE = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
SCREAMING_SNAKE_CASE = len(self.sp_model) - 1
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a)
if token_ids_a is None:
return [1] + ([0] * len(a)) + [1]
return [1] + ([0] * len(a)) + [1, 1] + ([0] * len(a)) + [1]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
return len(self.sp_model)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[str]:
return self.sp_model.encode(a , out_type=a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(a)
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self , a) -> Union[str, Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a) + token
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(a)
SCREAMING_SNAKE_CASE = False
out_string += self.sp_model.decode(a)
return out_string.strip()
def __getstate__( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , a) -> int:
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]:
if not os.path.isdir(a):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
SCREAMING_SNAKE_CASE = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(a) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , a)
elif not os.path.isfile(self.vocab_file):
with open(a , 'wb') as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(a)
return (out_vocab_file,)
| 73 |
import sys
import turtle
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
if depth == 0:
return
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
a_ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
a_ : str = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 73 | 1 |
from __future__ import annotations
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0')
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0')
if principal <= 0:
raise ValueError('principal must be > 0')
return principal * daily_interest_rate * days_between_payments
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0')
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0')
if principal <= 0:
raise ValueError('principal must be > 0')
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0')
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0')
if principal <= 0:
raise ValueError('principal must be > 0')
return compound_interest(
_UpperCAmelCase , nominal_annual_percentage_rate / 365 , number_of_years * 365)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a_ : Any = 'true'
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=16):
set_seed(42)
SCREAMING_SNAKE_CASE = RegressionModel()
SCREAMING_SNAKE_CASE = deepcopy(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = RegressionDataset(length=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase)
model.to(accelerator.device)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase)
return model, ddp_model, dataloader
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased')
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' , split='validation')
def tokenize_function(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase)
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(_UpperCAmelCase):
if use_longest:
return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt')
return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt')
return DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=16)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = Accelerator(dispatch_batches=_UpperCAmelCase , split_batches=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = get_dataloader(_UpperCAmelCase , not dispatch_batches)
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
for batch in dataloader:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCAmelCase)
targs.append(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.cat(_UpperCAmelCase), torch.cat(_UpperCAmelCase)
return logits, targs
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=16):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_basic_setup(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_predictions(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
assert (
len(_UpperCAmelCase) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCAmelCase)}'''
def lowerCamelCase__ (_UpperCAmelCase = False , _UpperCAmelCase = False):
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_mrpc_setup(_UpperCAmelCase , _UpperCAmelCase)
# First do baseline
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['no']
model.to(_UpperCAmelCase)
model.eval()
for batch in dataloader:
batch.to(_UpperCAmelCase)
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=_UpperCAmelCase , references=batch['labels'])
SCREAMING_SNAKE_CASE = metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
SCREAMING_SNAKE_CASE = batch['labels']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=_UpperCAmelCase , references=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key]), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''')
test_mrpc(_UpperCAmelCase , _UpperCAmelCase)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase)
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''')
test_torch_metrics(_UpperCAmelCase , 99)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**')
SCREAMING_SNAKE_CASE = Accelerator()
test_torch_metrics(_UpperCAmelCase , 512)
accelerator.state._reset_state()
def lowerCamelCase__ (_UpperCAmelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 73 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : List[str] = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class _snake_case ( A__ ):
_lowercase : str = '''luke'''
def __init__( self , a=5_0267 , a=50_0000 , a=768 , a=256 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=2 , a=0.02 , a=1E-12 , a=True , a=None , a=1 , a=0 , a=2 , **a , ) -> Dict:
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = entity_vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = entity_emb_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = use_entity_aware_attention
SCREAMING_SNAKE_CASE = classifier_dropout
| 73 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a_ : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 73 | 1 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False):
if radian_mode:
return [magnitude * cos(_UpperCAmelCase), magnitude * sin(_UpperCAmelCase)]
return [magnitude * cos(radians(_UpperCAmelCase)), magnitude * sin(radians(_UpperCAmelCase))]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 10**-1):
SCREAMING_SNAKE_CASE = cross(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = sum(_UpperCAmelCase)
return abs(_UpperCAmelCase) < eps
if __name__ == "__main__":
# Test to check if it works
a_ : int = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
a_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
a_ : Dict = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
a_ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
a_ : int = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
a_ : Optional[Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
a_ : str = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def lowerCamelCase__ (_UpperCAmelCase=True):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A__ ) )
class _snake_case ( A__ ):
_lowercase : Optional[Any] = None
_lowercase : Optional[Any] = None
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Optional[Any]:
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = dataset_module_factory(a , cache_dir=a)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=a)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a).replace(os.sep , '/'),
config.DATASET_INFO_FILENAME,
])
SCREAMING_SNAKE_CASE = cached_path(a , cache_dir=a)
self.assertTrue(os.path.exists(a))
@pytest.mark.integration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('test_hf_gcp') / 'test_wikipedia_simple'
SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
SCREAMING_SNAKE_CASE = None
builder_instance.download_and_prepare()
SCREAMING_SNAKE_CASE = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_UpperCAmelCase , _UpperCAmelCase)
assert "train" in ds
assert isinstance(ds['train'] , _UpperCAmelCase)
assert next(iter(ds['train']))
| 73 | 1 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( A__ ):
_lowercase : Dict = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE__ ( self , **a) -> List[Any]:
SCREAMING_SNAKE_CASE = {
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**a)
return config
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=a , beta_end=a)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
self.check_over_configs(thresholding=a)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=a , prediction_type=a , sample_max_value=a , )
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
for t in [0, 500, 999]:
self.check_over_forward(time_step=a)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**a)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**a)
SCREAMING_SNAKE_CASE = len(a)
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
SCREAMING_SNAKE_CASE = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE = samplea.shape[0]
SCREAMING_SNAKE_CASE = torch.stack([samplea, samplea, samplea] , dim=0)
SCREAMING_SNAKE_CASE = torch.arange(a)[0:3, None].repeat(1 , a)
SCREAMING_SNAKE_CASE = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
SCREAMING_SNAKE_CASE = scheduler.batch_step_no_noise(a , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(a))
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 11_53.18_33) < 1E-2
assert abs(result_mean.item() - 0.50_05) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**a)
SCREAMING_SNAKE_CASE = len(a)
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
SCREAMING_SNAKE_CASE = torch.manual_seed(0)
for t in reversed(range(a)):
# 1. predict noise residual
SCREAMING_SNAKE_CASE = model(a , a)
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE = scheduler.step(a , a , a , generator=a).prev_sample
SCREAMING_SNAKE_CASE = pred_prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(a))
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 2_58.96_06) < 1E-2
assert abs(result_mean.item() - 0.33_72) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type='v_prediction')
SCREAMING_SNAKE_CASE = scheduler_class(**a)
SCREAMING_SNAKE_CASE = len(a)
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
SCREAMING_SNAKE_CASE = torch.manual_seed(0)
for t in reversed(range(a)):
# 1. predict noise residual
SCREAMING_SNAKE_CASE = model(a , a)
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE = scheduler.step(a , a , a , generator=a).prev_sample
SCREAMING_SNAKE_CASE = pred_prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(a))
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 2_02.02_96) < 1E-2
assert abs(result_mean.item() - 0.26_31) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**a)
SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=a)
SCREAMING_SNAKE_CASE = scheduler.timesteps
for i, timestep in enumerate(a):
if i == len(a) - 1:
SCREAMING_SNAKE_CASE = -1
else:
SCREAMING_SNAKE_CASE = timesteps[i + 1]
SCREAMING_SNAKE_CASE = scheduler.previous_timestep(a)
SCREAMING_SNAKE_CASE = prev_t.item()
self.assertEqual(a , a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**a)
SCREAMING_SNAKE_CASE = [100, 87, 50, 51, 0]
with self.assertRaises(a , msg='`custom_timesteps` must be in descending order.'):
scheduler.set_timesteps(timesteps=a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**a)
SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0]
SCREAMING_SNAKE_CASE = len(a)
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.'):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**a)
SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a)
| 73 |
from __future__ import annotations
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_UpperCAmelCase)
if n > 1:
factors.append(_UpperCAmelCase)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
a_ : str = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def lowerCamelCase__ (_UpperCAmelCase=True):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A__ ) )
class _snake_case ( A__ ):
_lowercase : Optional[Any] = None
_lowercase : Optional[Any] = None
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Optional[Any]:
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = dataset_module_factory(a , cache_dir=a)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=a)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a).replace(os.sep , '/'),
config.DATASET_INFO_FILENAME,
])
SCREAMING_SNAKE_CASE = cached_path(a , cache_dir=a)
self.assertTrue(os.path.exists(a))
@pytest.mark.integration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('test_hf_gcp') / 'test_wikipedia_simple'
SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
SCREAMING_SNAKE_CASE = None
builder_instance.download_and_prepare()
SCREAMING_SNAKE_CASE = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_UpperCAmelCase , _UpperCAmelCase)
assert "train" in ds
assert isinstance(ds['train'] , _UpperCAmelCase)
assert next(iter(ds['train']))
| 73 |
import math
import os
import sys
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = ''
try:
with open(_UpperCAmelCase , 'rb') as binary_file:
SCREAMING_SNAKE_CASE = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lexicon.pop(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = last_match_id
if math.loga(_UpperCAmelCase).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE = '0' + lexicon[curr_key]
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = '', ''
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
for i in range(len(_UpperCAmelCase)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
index += 1
SCREAMING_SNAKE_CASE = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
return result
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = os.path.getsize(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 8
try:
with open(_UpperCAmelCase , 'wb') as opened_file:
SCREAMING_SNAKE_CASE = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCAmelCase) , _UpperCAmelCase)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append('10000000')
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array:
opened_file.write(int(_UpperCAmelCase , 2).to_bytes(1 , byteorder='big'))
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = read_file_binary(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = compress_data(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = add_file_length(_UpperCAmelCase , _UpperCAmelCase)
write_file_binary(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 73 | 1 |
# using dfs for finding eulerian path traversal
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None):
SCREAMING_SNAKE_CASE = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True, True
SCREAMING_SNAKE_CASE = dfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return path
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = -1
for i in range(_UpperCAmelCase):
if i not in graph.keys():
continue
if len(graph[i]) % 2 == 1:
odd_degree_nodes += 1
SCREAMING_SNAKE_CASE = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = [[False for _ in range(max_node + 1)] for _ in range(max_node + 1)]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = check_circuit_or_path(_UpperCAmelCase , _UpperCAmelCase)
if check == 3:
print('graph is not Eulerian')
print('no path')
return
SCREAMING_SNAKE_CASE = 1
if check == 2:
SCREAMING_SNAKE_CASE = odd_node
print('graph has a Euler path')
if check == 1:
print('graph has a Euler cycle')
SCREAMING_SNAKE_CASE = dfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
print(_UpperCAmelCase)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
SCREAMING_SNAKE_CASE = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
SCREAMING_SNAKE_CASE = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
SCREAMING_SNAKE_CASE = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
SCREAMING_SNAKE_CASE = {
1: [],
2: []
# all degree is zero
}
SCREAMING_SNAKE_CASE = 10
check_euler(_UpperCAmelCase , _UpperCAmelCase)
check_euler(_UpperCAmelCase , _UpperCAmelCase)
check_euler(_UpperCAmelCase , _UpperCAmelCase)
check_euler(_UpperCAmelCase , _UpperCAmelCase)
check_euler(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
main()
| 73 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase__ (_UpperCAmelCase):
return 1.0 / (1.0 + np.exp(-_outputs))
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = np.max(_outputs , axis=-1 , keepdims=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = np.exp(_outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_UpperCAmelCase)
class _snake_case ( A__ ):
_lowercase : Tuple = '''sigmoid'''
_lowercase : List[str] = '''softmax'''
_lowercase : Tuple = '''none'''
@add_end_docstrings(
A__ , R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class _snake_case ( A__ ):
_lowercase : Optional[Any] = False
_lowercase : Tuple = ClassificationFunction.NONE
def __init__( self , **a) -> Optional[Any]:
super().__init__(**a)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
def SCREAMING_SNAKE_CASE__ ( self , a=None , a=None , a="" , **a) -> Tuple:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
SCREAMING_SNAKE_CASE = tokenizer_kwargs
SCREAMING_SNAKE_CASE = {}
if hasattr(self.model.config , 'return_all_scores') and return_all_scores is None:
SCREAMING_SNAKE_CASE = self.model.config.return_all_scores
if isinstance(a , a) or top_k is None:
SCREAMING_SNAKE_CASE = top_k
SCREAMING_SNAKE_CASE = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , a , )
if return_all_scores:
SCREAMING_SNAKE_CASE = None
else:
SCREAMING_SNAKE_CASE = 1
if isinstance(a , a):
SCREAMING_SNAKE_CASE = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
SCREAMING_SNAKE_CASE = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *a , **a) -> Optional[int]:
SCREAMING_SNAKE_CASE = super().__call__(*a , **a)
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
SCREAMING_SNAKE_CASE = 'top_k' not in kwargs
if isinstance(args[0] , a) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def SCREAMING_SNAKE_CASE__ ( self , a , **a) -> Dict[str, GenericTensor]:
SCREAMING_SNAKE_CASE = self.framework
if isinstance(a , a):
return self.tokenizer(**a , return_tensors=a , **a)
elif isinstance(a , a) and len(a) == 1 and isinstance(inputs[0] , a) and len(inputs[0]) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a , **a)
elif isinstance(a , a):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.')
return self.tokenizer(a , return_tensors=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]:
return self.model(**a)
def SCREAMING_SNAKE_CASE__ ( self , a , a=None , a=1 , a=True) -> Any:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
SCREAMING_SNAKE_CASE = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
SCREAMING_SNAKE_CASE = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply') and function_to_apply is None:
SCREAMING_SNAKE_CASE = self.model.config.function_to_apply
else:
SCREAMING_SNAKE_CASE = ClassificationFunction.NONE
SCREAMING_SNAKE_CASE = model_outputs['logits'][0]
SCREAMING_SNAKE_CASE = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
SCREAMING_SNAKE_CASE = sigmoid(a)
elif function_to_apply == ClassificationFunction.SOFTMAX:
SCREAMING_SNAKE_CASE = softmax(a)
elif function_to_apply == ClassificationFunction.NONE:
SCREAMING_SNAKE_CASE = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''')
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
SCREAMING_SNAKE_CASE = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(a)
]
if not _legacy:
dict_scores.sort(key=lambda a: x["score"] , reverse=a)
if top_k is not None:
SCREAMING_SNAKE_CASE = dict_scores[:top_k]
return dict_scores
| 73 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCamelCase__ (_UpperCAmelCase):
# A local function to see if a dot lands in the circle.
def is_in_circle(_UpperCAmelCase , _UpperCAmelCase) -> bool:
SCREAMING_SNAKE_CASE = sqrt((x**2) + (y**2))
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE = mean(
int(is_in_circle(uniform(-1.0 , 1.0) , uniform(-1.0 , 1.0)))
for _ in range(_UpperCAmelCase))
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''')
print(F'''The numpy value of pi is {pi}''')
print(F'''The total error is {abs(pi - pi_estimate)}''')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(_UpperCAmelCase , _UpperCAmelCase)) for _ in range(_UpperCAmelCase)) * (max_value - min_value)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1.0):
def identity_function(_UpperCAmelCase) -> float:
return x
SCREAMING_SNAKE_CASE = area_under_curve_estimator(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = (max_value * max_value - min_value * min_value) / 2
print('******************')
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''')
print(F'''Estimated value is {estimated_value}''')
print(F'''Expected value is {expected_value}''')
print(F'''Total error is {abs(estimated_value - expected_value)}''')
print('******************')
def lowerCamelCase__ (_UpperCAmelCase):
def function_to_integrate(_UpperCAmelCase) -> float:
return sqrt(4.0 - x * x)
SCREAMING_SNAKE_CASE = area_under_curve_estimator(
_UpperCAmelCase , _UpperCAmelCase , 0.0 , 2.0)
print('******************')
print('Estimating pi using area_under_curve_estimator')
print(F'''Estimated value is {estimated_value}''')
print(F'''Expected value is {pi}''')
print(F'''Total error is {abs(estimated_value - pi)}''')
print('******************')
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
import heapq as hq
import math
from collections.abc import Iterator
class _snake_case :
def __init__( self , a) -> Optional[Any]:
SCREAMING_SNAKE_CASE = str(id_)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = {} # {vertex:distance}
def __lt__( self , a) -> Dict:
return self.key < other.key
def __repr__( self) -> Optional[Any]:
return self.id
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]:
self.neighbors.append(a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Tuple:
SCREAMING_SNAKE_CASE = weight
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1])
graph[b - 1].add_neighbor(graph[a - 1])
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCAmelCase)
graph[b - 1].add_edge(graph[a - 1] , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
for u in graph:
SCREAMING_SNAKE_CASE = math.inf
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = graph[:]
while q:
SCREAMING_SNAKE_CASE = min(_UpperCAmelCase)
q.remove(_UpperCAmelCase)
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE = u
SCREAMING_SNAKE_CASE = u.edges[v.id]
for i in range(1 , len(_UpperCAmelCase)):
a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1))
return a
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
for u in graph:
SCREAMING_SNAKE_CASE = math.inf
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = list(_UpperCAmelCase)
hq.heapify(_UpperCAmelCase)
while h:
SCREAMING_SNAKE_CASE = hq.heappop(_UpperCAmelCase)
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE = u
SCREAMING_SNAKE_CASE = u.edges[v.id]
hq.heapify(_UpperCAmelCase)
for i in range(1 , len(_UpperCAmelCase)):
yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1)
def lowerCamelCase__ ():
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : int = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class _snake_case ( A__ ):
_lowercase : Dict = '''cvt'''
def __init__( self , a=3 , a=[7, 3, 3] , a=[4, 2, 2] , a=[2, 1, 1] , a=[64, 192, 384] , a=[1, 3, 6] , a=[1, 2, 10] , a=[4.0, 4.0, 4.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.1] , a=[True, True, True] , a=[False, False, True] , a=["dw_bn", "dw_bn", "dw_bn"] , a=[3, 3, 3] , a=[1, 1, 1] , a=[2, 2, 2] , a=[1, 1, 1] , a=[1, 1, 1] , a=0.02 , a=1E-12 , **a , ) -> List[Any]:
super().__init__(**a)
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = patch_stride
SCREAMING_SNAKE_CASE = patch_padding
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = depth
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = attention_drop_rate
SCREAMING_SNAKE_CASE = drop_rate
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = cls_token
SCREAMING_SNAKE_CASE = qkv_projection_method
SCREAMING_SNAKE_CASE = kernel_qkv
SCREAMING_SNAKE_CASE = padding_kv
SCREAMING_SNAKE_CASE = stride_kv
SCREAMING_SNAKE_CASE = padding_q
SCREAMING_SNAKE_CASE = stride_q
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
| 73 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[Any] = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 73 | 1 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE = RemBertConfig.from_json_file(_UpperCAmelCase)
print('Building PyTorch model from configuration: {}'.format(str(_UpperCAmelCase)))
SCREAMING_SNAKE_CASE = RemBertModel(_UpperCAmelCase)
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# Save pytorch-model
print('Save PyTorch model to {}'.format(_UpperCAmelCase))
torch.save(model.state_dict() , _UpperCAmelCase)
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : str = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 73 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Union[str, Any] = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _snake_case ( A__ ):
_lowercase : Optional[Any] = '''decision_transformer'''
_lowercase : str = ['''past_key_values''']
_lowercase : Union[str, Any] = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a=17 , a=4 , a=128 , a=4096 , a=True , a=1 , a=1024 , a=3 , a=1 , a=None , a="relu" , a=0.1 , a=0.1 , a=0.1 , a=1E-5 , a=0.02 , a=True , a=True , a=5_0256 , a=5_0256 , a=False , a=False , **a , ) -> List[str]:
SCREAMING_SNAKE_CASE = state_dim
SCREAMING_SNAKE_CASE = act_dim
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = max_ep_len
SCREAMING_SNAKE_CASE = action_tanh
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = n_inner
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = attn_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scale_attn_weights
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(bos_token_id=a , eos_token_id=a , **a)
| 73 | 1 |
def lowerCamelCase__ (_UpperCAmelCase):
if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase) or x < 0 for x in sequence):
raise TypeError('Sequence must be list of non-negative integers')
for _ in range(len(_UpperCAmelCase)):
for i, (rod_upper, rod_lower) in enumerate(zip(_UpperCAmelCase , sequence[1:])):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 73 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ : Optional[int] = 16
a_ : Any = 32
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 16):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('bert-base-cased')
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc')
def tokenize_function(_UpperCAmelCase):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(_UpperCAmelCase):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE = 8
else:
SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
_UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
# Initialize accelerator
SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE = config['lr']
SCREAMING_SNAKE_CASE = int(config['num_epochs'])
SCREAMING_SNAKE_CASE = int(config['seed'])
SCREAMING_SNAKE_CASE = int(config['batch_size'])
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc')
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE = model.to(accelerator.device)
# Instantiate optimizer
SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=_UpperCAmelCase)
# Instantiate scheduler
SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# Now we train the model
for epoch in range(_UpperCAmelCase):
model.train()
for step, batch in enumerate(_UpperCAmelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.loss
SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _UpperCAmelCase)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
main()
| 73 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
a_ : str = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
a_ : Optional[int] = {
'RUCAIBox/mvp': 10_24,
}
class _snake_case ( A__ ):
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Tuple = ['''input_ids''', '''attention_mask''']
_lowercase : Optional[int] = MvpTokenizer
def __init__( self , a=None , a=None , a=None , a="replace" , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a=False , a=True , **a , ) -> int:
super().__init__(
a , a , tokenizer_file=a , errors=a , bos_token=a , eos_token=a , sep_token=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , add_prefix_space=a , trim_offsets=a , **a , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , a) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(a , pre_tok_state.pop('type'))
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**a)
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , a , a)
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'])
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'])
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , a) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , a) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(a , state.pop('type'))
SCREAMING_SNAKE_CASE = component_class(**a)
setattr(self.backend_tokenizer , a , a)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
SCREAMING_SNAKE_CASE = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else value
SCREAMING_SNAKE_CASE = value
def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> BatchEncoding:
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , a)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*a , **a)
def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> BatchEncoding:
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , a)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._encode_plus(*a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]:
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(a , name=a)
return tuple(a)
def SCREAMING_SNAKE_CASE__ ( self , a , a=None) -> Dict:
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 73 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : int = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 73 | 1 |
import json
import sys
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
with open(_UpperCAmelCase , encoding='utf-8') as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = results[benchmark_name]
SCREAMING_SNAKE_CASE = benchmark_name.split('/')[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''')
SCREAMING_SNAKE_CASE = '| metric |'
SCREAMING_SNAKE_CASE = '|--------|'
SCREAMING_SNAKE_CASE = '| new / old (diff) |'
for metric_name in sorted(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = benchmark_res[metric_name]
SCREAMING_SNAKE_CASE = metric_vals['new']
SCREAMING_SNAKE_CASE = metric_vals.get('old' , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = metric_vals.get('diff' , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = F''' {new_val:f}''' if isinstance(_UpperCAmelCase , (int, float)) else 'None'
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(_UpperCAmelCase , (int, float)) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(_UpperCAmelCase , (int, float)) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>')
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as f:
f.writelines('\n'.join(_UpperCAmelCase))
if __name__ == "__main__":
a_ : Dict = sys.argv[1]
a_ : List[str] = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 73 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False):
if radian_mode:
return [magnitude * cos(_UpperCAmelCase), magnitude * sin(_UpperCAmelCase)]
return [magnitude * cos(radians(_UpperCAmelCase)), magnitude * sin(radians(_UpperCAmelCase))]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 10**-1):
SCREAMING_SNAKE_CASE = cross(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = sum(_UpperCAmelCase)
return abs(_UpperCAmelCase) < eps
if __name__ == "__main__":
# Test to check if it works
a_ : int = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
a_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
a_ : Dict = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
a_ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
a_ : int = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
a_ : Optional[Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 | 1 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
SCREAMING_SNAKE_CASE = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase).raw).convert('RGB')
SCREAMING_SNAKE_CASE = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11)),
])
SCREAMING_SNAKE_CASE = transform(_UpperCAmelCase).unsqueeze(0).to(_UpperCAmelCase)
return image
def lowerCamelCase__ (_UpperCAmelCase):
if "visual_encoder" in key:
SCREAMING_SNAKE_CASE = re.sub('visual_encoder*' , 'vision_model.encoder' , _UpperCAmelCase)
if "blocks" in key:
SCREAMING_SNAKE_CASE = re.sub(R'blocks' , 'layers' , _UpperCAmelCase)
if "attn" in key:
SCREAMING_SNAKE_CASE = re.sub(R'attn' , 'self_attn' , _UpperCAmelCase)
if "norm1" in key:
SCREAMING_SNAKE_CASE = re.sub(R'norm1' , 'layer_norm1' , _UpperCAmelCase)
if "norm2" in key:
SCREAMING_SNAKE_CASE = re.sub(R'norm2' , 'layer_norm2' , _UpperCAmelCase)
if "encoder.norm" in key:
SCREAMING_SNAKE_CASE = re.sub(R'encoder.norm' , 'post_layernorm' , _UpperCAmelCase)
if "encoder.patch_embed.proj" in key:
SCREAMING_SNAKE_CASE = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , _UpperCAmelCase)
if "encoder.pos_embed" in key:
SCREAMING_SNAKE_CASE = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , _UpperCAmelCase)
if "encoder.cls_token" in key:
SCREAMING_SNAKE_CASE = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , _UpperCAmelCase)
if "self_attn" in key:
SCREAMING_SNAKE_CASE = re.sub(R'self_attn.proj' , 'self_attn.projection' , _UpperCAmelCase)
return key
@torch.no_grad()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=None):
if config_path is not None:
SCREAMING_SNAKE_CASE = BlipConfig.from_pretrained(_UpperCAmelCase)
else:
SCREAMING_SNAKE_CASE = BlipConfig(projection_dim=512 , text_config={} , vision_config={})
SCREAMING_SNAKE_CASE = BlipForConditionalGeneration(_UpperCAmelCase).eval()
SCREAMING_SNAKE_CASE = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
SCREAMING_SNAKE_CASE = blip_decoder(pretrained=_UpperCAmelCase , image_size=384 , vit='base')
SCREAMING_SNAKE_CASE = pt_model.eval()
SCREAMING_SNAKE_CASE = pt_model.state_dict()
for key in modified_state_dict.copy():
SCREAMING_SNAKE_CASE = modified_state_dict.pop(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = rename_key(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = value
hf_model.load_state_dict(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = 384
SCREAMING_SNAKE_CASE = load_demo_image(image_size=_UpperCAmelCase , device='cpu')
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained('bert-base-uncased')
SCREAMING_SNAKE_CASE = tokenizer(['a picture of']).input_ids
SCREAMING_SNAKE_CASE = hf_model.generate(_UpperCAmelCase , _UpperCAmelCase)
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
SCREAMING_SNAKE_CASE = hf_model.generate(_UpperCAmelCase)
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_UpperCAmelCase)
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
SCREAMING_SNAKE_CASE = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
SCREAMING_SNAKE_CASE = blip_vqa(pretrained=_UpperCAmelCase , image_size=_UpperCAmelCase , vit='base')
vqa_model.eval()
SCREAMING_SNAKE_CASE = vqa_model.state_dict()
for key in modified_state_dict.copy():
SCREAMING_SNAKE_CASE = modified_state_dict.pop(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = rename_key(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE = BlipForQuestionAnswering(_UpperCAmelCase)
hf_vqa_model.load_state_dict(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = ['How many dogs are in this image?']
SCREAMING_SNAKE_CASE = tokenizer(_UpperCAmelCase , return_tensors='pt').input_ids
SCREAMING_SNAKE_CASE = hf_vqa_model.generate(_UpperCAmelCase , _UpperCAmelCase)
print(tokenizer.decode(answer[0]))
assert tokenizer.decode(answer[0]) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa')
SCREAMING_SNAKE_CASE = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
SCREAMING_SNAKE_CASE = blip_itm(pretrained=_UpperCAmelCase , image_size=_UpperCAmelCase , vit='base')
itm_model.eval()
SCREAMING_SNAKE_CASE = itm_model.state_dict()
for key in modified_state_dict.copy():
SCREAMING_SNAKE_CASE = modified_state_dict.pop(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = rename_key(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE = BlipForImageTextRetrieval(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = ['A picture of a woman with a dog sitting in a beach']
SCREAMING_SNAKE_CASE = tokenizer(
_UpperCAmelCase , return_tensors='pt' , padding='max_length' , truncation=_UpperCAmelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_UpperCAmelCase)
hf_itm_model.eval()
SCREAMING_SNAKE_CASE = hf_itm_model(_UpperCAmelCase , _UpperCAmelCase , use_itm_head=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = hf_itm_model(_UpperCAmelCase , _UpperCAmelCase , use_itm_head=_UpperCAmelCase)
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1)[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm')
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
a_ : Union[str, Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 73 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : int = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class _snake_case ( A__ ):
_lowercase : Dict = '''cvt'''
def __init__( self , a=3 , a=[7, 3, 3] , a=[4, 2, 2] , a=[2, 1, 1] , a=[64, 192, 384] , a=[1, 3, 6] , a=[1, 2, 10] , a=[4.0, 4.0, 4.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.1] , a=[True, True, True] , a=[False, False, True] , a=["dw_bn", "dw_bn", "dw_bn"] , a=[3, 3, 3] , a=[1, 1, 1] , a=[2, 2, 2] , a=[1, 1, 1] , a=[1, 1, 1] , a=0.02 , a=1E-12 , **a , ) -> List[Any]:
super().__init__(**a)
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = patch_stride
SCREAMING_SNAKE_CASE = patch_padding
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = depth
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = attention_drop_rate
SCREAMING_SNAKE_CASE = drop_rate
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = cls_token
SCREAMING_SNAKE_CASE = qkv_projection_method
SCREAMING_SNAKE_CASE = kernel_qkv
SCREAMING_SNAKE_CASE = padding_kv
SCREAMING_SNAKE_CASE = stride_kv
SCREAMING_SNAKE_CASE = padding_q
SCREAMING_SNAKE_CASE = stride_q
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
| 73 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
a_ : Optional[Any] = re.compile(R'\b(a|an|the)\b', re.UNICODE)
a_ : List[str] = None
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.')
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.')
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.')
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).')
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.')
parser.add_argument(
'--na-prob-thresh' , '-t' , type=_UpperCAmelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=_UpperCAmelCase , help='Save precision-recall curves to directory.')
parser.add_argument('--verbose' , '-v' , action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE = bool(qa['answers']['text'])
return qid_to_has_ans
def lowerCamelCase__ (_UpperCAmelCase):
def remove_articles(_UpperCAmelCase):
return ARTICLES_REGEX.sub(' ' , _UpperCAmelCase)
def white_space_fix(_UpperCAmelCase):
return " ".join(text.split())
def remove_punc(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(_UpperCAmelCase):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase))))
def lowerCamelCase__ (_UpperCAmelCase):
if not s:
return []
return normalize_answer(_UpperCAmelCase).split()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return int(normalize_answer(_UpperCAmelCase) == normalize_answer(_UpperCAmelCase))
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = collections.Counter(_UpperCAmelCase) & collections.Counter(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = sum(common.values())
if len(_UpperCAmelCase) == 0 or len(_UpperCAmelCase) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE = qa['id']
SCREAMING_SNAKE_CASE = [t for t in qa['answers']['text'] if normalize_answer(_UpperCAmelCase)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
SCREAMING_SNAKE_CASE = ['']
if qid not in preds:
print(F'''Missing prediction for {qid}''')
continue
SCREAMING_SNAKE_CASE = preds[qid]
# Take max over all gold answers
SCREAMING_SNAKE_CASE = max(compute_exact(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers)
SCREAMING_SNAKE_CASE = max(compute_fa(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers)
return exact_scores, fa_scores
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
for qid, s in scores.items():
SCREAMING_SNAKE_CASE = na_probs[qid] > na_prob_thresh
if pred_na:
SCREAMING_SNAKE_CASE = float(not qid_to_has_ans[qid])
else:
SCREAMING_SNAKE_CASE = s
return new_scores
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None):
if not qid_list:
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores.values()) / total),
('f1', 1_00.0 * sum(fa_scores.values()) / total),
('total', total),
])
else:
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores[k] for k in qid_list) / total),
('f1', 1_00.0 * sum(fa_scores[k] for k in qid_list) / total),
('total', total),
])
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
for k in new_eval:
SCREAMING_SNAKE_CASE = new_eval[k]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
plt.step(_UpperCAmelCase , _UpperCAmelCase , color='b' , alpha=0.2 , where='post')
plt.fill_between(_UpperCAmelCase , _UpperCAmelCase , step='post' , alpha=0.2 , color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(_UpperCAmelCase)
plt.savefig(_UpperCAmelCase)
plt.clf()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None):
SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k])
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1.0
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = [1.0]
SCREAMING_SNAKE_CASE = [0.0]
SCREAMING_SNAKE_CASE = 0.0
for i, qid in enumerate(_UpperCAmelCase):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
SCREAMING_SNAKE_CASE = true_pos / float(i + 1)
SCREAMING_SNAKE_CASE = true_pos / float(_UpperCAmelCase)
if i == len(_UpperCAmelCase) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_UpperCAmelCase)
recalls.append(_UpperCAmelCase)
if out_image:
plot_pr_curve(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return {"ap": 1_00.0 * avg_prec}
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if out_image_dir and not os.path.exists(_UpperCAmelCase):
os.makedirs(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_exact.png') , title='Precision-Recall curve for Exact Match score' , )
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_f1.png') , title='Precision-Recall curve for F1 score' , )
SCREAMING_SNAKE_CASE = {k: float(_UpperCAmelCase) for k, v in qid_to_has_ans.items()}
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_oracle.png') , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_exact')
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_f1')
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_oracle')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if not qid_list:
return
SCREAMING_SNAKE_CASE = [na_probs[k] for k in qid_list]
SCREAMING_SNAKE_CASE = np.ones_like(_UpperCAmelCase) / float(len(_UpperCAmelCase))
plt.hist(_UpperCAmelCase , weights=_UpperCAmelCase , bins=20 , range=(0.0, 1.0))
plt.xlabel('Model probability of no-answer')
plt.ylabel('Proportion of dataset')
plt.title(F'''Histogram of no-answer probability: {name}''')
plt.savefig(os.path.join(_UpperCAmelCase , F'''na_prob_hist_{name}.png'''))
plt.clf()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
SCREAMING_SNAKE_CASE = num_no_ans
SCREAMING_SNAKE_CASE = cur_score
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k])
for i, qid in enumerate(_UpperCAmelCase):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
SCREAMING_SNAKE_CASE = scores[qid]
else:
if preds[qid]:
SCREAMING_SNAKE_CASE = -1
else:
SCREAMING_SNAKE_CASE = 0
cur_score += diff
if cur_score > best_score:
SCREAMING_SNAKE_CASE = cur_score
SCREAMING_SNAKE_CASE = na_probs[qid]
return 1_00.0 * best_score / len(_UpperCAmelCase), best_thresh
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = best_exact
SCREAMING_SNAKE_CASE = exact_thresh
SCREAMING_SNAKE_CASE = best_fa
SCREAMING_SNAKE_CASE = fa_thresh
def lowerCamelCase__ ():
with open(OPTS.data_file) as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = dataset_json['data']
with open(OPTS.pred_file) as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase)
else:
SCREAMING_SNAKE_CASE = {k: 0.0 for k in preds}
SCREAMING_SNAKE_CASE = make_qid_to_has_ans(_UpperCAmelCase) # maps qid to True/False
SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if v]
SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if not v]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_raw_scores(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh)
SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh)
SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase)
if has_ans_qids:
SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase)
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'HasAns')
if no_ans_qids:
SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase)
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'NoAns')
if OPTS.na_prob_file:
find_all_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir)
histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'hasAns')
histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'noAns')
if OPTS.out_file:
with open(OPTS.out_file , 'w') as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
else:
print(json.dumps(_UpperCAmelCase , indent=2))
if __name__ == "__main__":
a_ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 73 |
def lowerCamelCase__ (_UpperCAmelCase = 10 , _UpperCAmelCase = 1000 , _UpperCAmelCase = True):
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase)
and isinstance(_UpperCAmelCase , _UpperCAmelCase)
and isinstance(_UpperCAmelCase , _UpperCAmelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)')
return min_val if option else max_val
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return int((number_a + number_a) / 2)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)')
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value')
def answer(_UpperCAmelCase) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...')
SCREAMING_SNAKE_CASE = lower
SCREAMING_SNAKE_CASE = higher
SCREAMING_SNAKE_CASE = []
while True:
SCREAMING_SNAKE_CASE = get_avg(_UpperCAmelCase , _UpperCAmelCase)
last_numbers.append(_UpperCAmelCase)
if answer(_UpperCAmelCase) == "low":
SCREAMING_SNAKE_CASE = number
elif answer(_UpperCAmelCase) == "high":
SCREAMING_SNAKE_CASE = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''')
print(F'''details : {last_numbers!s}''')
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = int(input('Enter lower value : ').strip())
SCREAMING_SNAKE_CASE = int(input('Enter high value : ').strip())
SCREAMING_SNAKE_CASE = int(input('Enter value to guess : ').strip())
guess_the_number(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
main()
| 73 | 1 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _snake_case ( unittest.TestCase ):
_lowercase : Optional[int] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_lowercase : Union[str, Any] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = AudioClassificationPipeline(model=a , feature_extractor=a)
# test with a raw waveform
SCREAMING_SNAKE_CASE = np.zeros((3_4000,))
SCREAMING_SNAKE_CASE = np.zeros((1_4000,))
return audio_classifier, [audioa, audio]
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = examples
SCREAMING_SNAKE_CASE = audio_classifier(a)
# by default a model is initialized with num_labels=2
self.assertEqual(
a , [
{'score': ANY(a), 'label': ANY(a)},
{'score': ANY(a), 'label': ANY(a)},
] , )
SCREAMING_SNAKE_CASE = audio_classifier(a , top_k=1)
self.assertEqual(
a , [
{'score': ANY(a), 'label': ANY(a)},
] , )
self.run_torchaudio(a)
@require_torchaudio
def SCREAMING_SNAKE_CASE__ ( self , a) -> Union[str, Any]:
import datasets
# test with a local file
SCREAMING_SNAKE_CASE = datasets.load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation')
SCREAMING_SNAKE_CASE = dataset[0]['audio']['array']
SCREAMING_SNAKE_CASE = audio_classifier(a)
self.assertEqual(
a , [
{'score': ANY(a), 'label': ANY(a)},
{'score': ANY(a), 'label': ANY(a)},
] , )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = 'anton-l/wav2vec2-random-tiny-classifier'
SCREAMING_SNAKE_CASE = pipeline('audio-classification' , model=a)
SCREAMING_SNAKE_CASE = np.ones((8000,))
SCREAMING_SNAKE_CASE = audio_classifier(a , top_k=4)
SCREAMING_SNAKE_CASE = [
{'score': 0.08_42, 'label': 'no'},
{'score': 0.08_38, 'label': 'up'},
{'score': 0.08_37, 'label': 'go'},
{'score': 0.08_34, 'label': 'right'},
]
SCREAMING_SNAKE_CASE = [
{'score': 0.08_45, 'label': 'stop'},
{'score': 0.08_44, 'label': 'on'},
{'score': 0.08_41, 'label': 'right'},
{'score': 0.08_34, 'label': 'left'},
]
self.assertIn(nested_simplify(a , decimals=4) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
SCREAMING_SNAKE_CASE = {'array': np.ones((8000,)), 'sampling_rate': audio_classifier.feature_extractor.sampling_rate}
SCREAMING_SNAKE_CASE = audio_classifier(a , top_k=4)
self.assertIn(nested_simplify(a , decimals=4) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
import datasets
SCREAMING_SNAKE_CASE = 'superb/wav2vec2-base-superb-ks'
SCREAMING_SNAKE_CASE = pipeline('audio-classification' , model=a)
SCREAMING_SNAKE_CASE = datasets.load_dataset('anton-l/superb_dummy' , 'ks' , split='test')
SCREAMING_SNAKE_CASE = np.array(dataset[3]['speech'] , dtype=np.floataa)
SCREAMING_SNAKE_CASE = audio_classifier(a , top_k=4)
self.assertEqual(
nested_simplify(a , decimals=3) , [
{'score': 0.9_81, 'label': 'go'},
{'score': 0.0_07, 'label': 'up'},
{'score': 0.0_06, 'label': '_unknown_'},
{'score': 0.0_01, 'label': 'down'},
] , )
@require_tf
@unittest.skip('Audio classification is not implemented for TF')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
pass
| 73 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _snake_case :
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=False , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , use_stable_embedding=a , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Any:
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , )
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> int:
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a)
model.to(a)
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size)
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1)
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1)
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1]).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3))
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( A__ , A__ , A__ , unittest.TestCase ):
_lowercase : List[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_lowercase : str = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_lowercase : List[str] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : List[str] = False
_lowercase : Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 10] , config.vocab_size)
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
original_model.to(a)
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
scaled_model.to(a)
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5))
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5))
| 73 | 1 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ : List[str] = '▁'
a_ : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _snake_case ( A__ , unittest.TestCase ):
_lowercase : str = BigBirdTokenizer
_lowercase : Any = BigBirdTokenizerFast
_lowercase : Tuple = True
_lowercase : int = True
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
super().setUp()
SCREAMING_SNAKE_CASE = self.tokenizer_class(a , keep_accents=a)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = '<s>'
SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a) , a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a) , a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<unk>')
self.assertEqual(vocab_keys[1] , '<s>')
self.assertEqual(vocab_keys[-1] , '[MASK]')
self.assertEqual(len(a) , 1004)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE = tokenizer.tokenize(a)
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
SCREAMING_SNAKE_CASE = tokenizer.encode(a , add_special_tokens=a)
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(a)
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = BigBirdTokenizer(a , keep_accents=a)
SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test')
self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a) , [285, 46, 10, 170, 382] , )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(a)
self.assertListEqual(
a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(a)
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = 'Hello World!'
SCREAMING_SNAKE_CASE = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(a , self.big_tokenizer.encode(a))
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
SCREAMING_SNAKE_CASE = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(a , self.big_tokenizer.encode(a))
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
SCREAMING_SNAKE_CASE = list(self.big_tokenizer.get_vocab().keys())[:10]
SCREAMING_SNAKE_CASE = ' '.join(a)
SCREAMING_SNAKE_CASE = self.big_tokenizer.encode_plus(a , return_tensors='pt' , return_token_type_ids=a)
SCREAMING_SNAKE_CASE = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=a)
SCREAMING_SNAKE_CASE = BigBirdConfig(attention_type='original_full')
SCREAMING_SNAKE_CASE = BigBirdModel(a)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a)
model(**a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
SCREAMING_SNAKE_CASE = tokenizer.decode(tokenizer('Paris is the [MASK].').input_ids)
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]')
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
# fmt: off
SCREAMING_SNAKE_CASE = {'input_ids': [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 73 |
from __future__ import annotations
a_ : str = []
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
for i in range(len(_UpperCAmelCase)):
if board[row][i] == 1:
return False
for i in range(len(_UpperCAmelCase)):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , -1 , -1)):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , len(_UpperCAmelCase))):
if board[i][j] == 1:
return False
return True
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
if row >= len(_UpperCAmelCase):
solution.append(_UpperCAmelCase)
printboard(_UpperCAmelCase)
print()
return True
for i in range(len(_UpperCAmelCase)):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 1
solve(_UpperCAmelCase , row + 1)
SCREAMING_SNAKE_CASE = 0
return False
def lowerCamelCase__ (_UpperCAmelCase):
for i in range(len(_UpperCAmelCase)):
for j in range(len(_UpperCAmelCase)):
if board[i][j] == 1:
print('Q' , end=' ')
else:
print('.' , end=' ')
print()
# n=int(input("The no. of queens"))
a_ : Tuple = 8
a_ : int = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 73 | 1 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _snake_case :
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=False , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , use_stable_embedding=a , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Any:
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , )
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> int:
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a)
model.to(a)
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size)
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1)
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1)
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1]).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3))
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( A__ , A__ , A__ , unittest.TestCase ):
_lowercase : List[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_lowercase : str = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_lowercase : List[str] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : List[str] = False
_lowercase : Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 10] , config.vocab_size)
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
original_model.to(a)
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
scaled_model.to(a)
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5))
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5))
| 73 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : List[Any] = StableDiffusionDiffEditPipeline
_lowercase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
_lowercase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
_lowercase : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowercase : List[str] = frozenset([] )
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
SCREAMING_SNAKE_CASE = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_zero=a , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(a)
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 16, 16) , rng=random.Random(a)).to(a)
SCREAMING_SNAKE_CASE = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a)).to(a)
if str(a).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a)
SCREAMING_SNAKE_CASE = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a)
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB')
if str(a).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a)
SCREAMING_SNAKE_CASE = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> Optional[int]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a)
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB')
if str(a).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a)
SCREAMING_SNAKE_CASE = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
if not hasattr(self.pipeline_class , '_optional_components'):
return
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a , a , a)
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components})
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a)
SCREAMING_SNAKE_CASE = pipe(**a)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a)
SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(a)
pipe_loaded.to(a)
pipe_loaded.set_progress_bar_config(disable=a)
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a , a) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a)
SCREAMING_SNAKE_CASE = pipe_loaded(**a)[0]
SCREAMING_SNAKE_CASE = np.abs(output - output_loaded).max()
self.assertLess(a , 1E-4)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_dummy_mask_inputs(a)
SCREAMING_SNAKE_CASE = pipe.generate_mask(**a)
SCREAMING_SNAKE_CASE = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16))
SCREAMING_SNAKE_CASE = np.array([0] * 9)
SCREAMING_SNAKE_CASE = np.abs(mask_slice.flatten() - expected_slice).max()
self.assertLessEqual(a , 1E-3)
self.assertEqual(mask[0, -3, -4] , 0)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(a)
SCREAMING_SNAKE_CASE = pipe.invert(**a).images
SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
SCREAMING_SNAKE_CASE = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(a , 1E-3)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=5E-3)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler(**a)
SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler(**a)
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(a)
SCREAMING_SNAKE_CASE = pipe.invert(**a).images
SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
SCREAMING_SNAKE_CASE = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(a , 1E-3)
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls) -> List[Any]:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png')
SCREAMING_SNAKE_CASE = raw_image.convert('RGB').resize((768, 768))
SCREAMING_SNAKE_CASE = raw_image
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = torch.manual_seed(0)
SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE = DDIMScheduler.from_config(pipe.scheduler.config)
SCREAMING_SNAKE_CASE = DDIMInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = 'a bowl of fruit'
SCREAMING_SNAKE_CASE = 'a bowl of pears'
SCREAMING_SNAKE_CASE = pipe.generate_mask(
image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , )
SCREAMING_SNAKE_CASE = pipe.invert(
prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a).latents
SCREAMING_SNAKE_CASE = pipe(
prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
SCREAMING_SNAKE_CASE = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png').resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = torch.manual_seed(0)
SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = 'a bowl of fruit'
SCREAMING_SNAKE_CASE = 'a bowl of pears'
SCREAMING_SNAKE_CASE = pipe.generate_mask(
image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , )
SCREAMING_SNAKE_CASE = pipe.invert(
prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a , num_inference_steps=25 , ).latents
SCREAMING_SNAKE_CASE = pipe(
prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
SCREAMING_SNAKE_CASE = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png').resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
| 73 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a_ : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 73 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : Any = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _snake_case ( A__ ):
_lowercase : Optional[int] = '''unispeech'''
def __init__( self , a=32 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=0.1 , a=0.0 , a=0.0 , a=0.1 , a=0.1 , a=0.02 , a=1E-5 , a="group" , a="gelu" , a=(512, 512, 512, 512, 512, 512, 512) , a=(5, 2, 2, 2, 2, 2, 2) , a=(10, 3, 3, 3, 3, 2, 2) , a=False , a=128 , a=16 , a=False , a=True , a=0.05 , a=10 , a=2 , a=0.0 , a=10 , a=0 , a=320 , a=2 , a=0.1 , a=100 , a=256 , a=256 , a=0.1 , a="mean" , a=False , a=False , a=256 , a=80 , a=0 , a=1 , a=2 , a=0.5 , **a , ) -> Optional[int]:
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a)
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = feat_extract_norm
SCREAMING_SNAKE_CASE = feat_extract_activation
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = conv_bias
SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE = len(self.conv_dim)
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = feat_proj_dropout
SCREAMING_SNAKE_CASE = final_dropout
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_ctc_classes
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = do_stable_layer_norm
SCREAMING_SNAKE_CASE = use_weighted_layer_sum
SCREAMING_SNAKE_CASE = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE = apply_spec_augment
SCREAMING_SNAKE_CASE = mask_time_prob
SCREAMING_SNAKE_CASE = mask_time_length
SCREAMING_SNAKE_CASE = mask_time_min_masks
SCREAMING_SNAKE_CASE = mask_feature_prob
SCREAMING_SNAKE_CASE = mask_feature_length
SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE = num_codevectors_per_group
SCREAMING_SNAKE_CASE = num_codevector_groups
SCREAMING_SNAKE_CASE = contrastive_logits_temperature
SCREAMING_SNAKE_CASE = feat_quantizer_dropout
SCREAMING_SNAKE_CASE = num_negatives
SCREAMING_SNAKE_CASE = codevector_dim
SCREAMING_SNAKE_CASE = proj_codevector_dim
SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE = ctc_loss_reduction
SCREAMING_SNAKE_CASE = ctc_zero_infinity
# pretraining loss
SCREAMING_SNAKE_CASE = replace_prob
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 73 | 1 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(a):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(a):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a)
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(a)
SCREAMING_SNAKE_CASE = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**a):
return model(**a)
eval(**a).block_until_ready()
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
for model_name in ["roberta-base", "roberta-large"]:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a)
SCREAMING_SNAKE_CASE = FlaxRobertaModel.from_pretrained(a)
SCREAMING_SNAKE_CASE = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**a):
return model(**a)
eval(**a).block_until_ready()
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
with self.assertRaisesRegex(
a , 'bert-base is not a local folder and is not a valid model identifier'):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained('bert-base')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
with self.assertRaisesRegex(
a , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(a , revision='aaaaaa')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
with self.assertRaisesRegex(
a , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
with self.assertRaisesRegex(a , 'Use `from_pt=True` to load this model'):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only')
| 73 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
a_ : Optional[Any] = re.compile(R'\b(a|an|the)\b', re.UNICODE)
a_ : List[str] = None
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.')
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.')
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.')
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).')
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.')
parser.add_argument(
'--na-prob-thresh' , '-t' , type=_UpperCAmelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=_UpperCAmelCase , help='Save precision-recall curves to directory.')
parser.add_argument('--verbose' , '-v' , action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE = bool(qa['answers']['text'])
return qid_to_has_ans
def lowerCamelCase__ (_UpperCAmelCase):
def remove_articles(_UpperCAmelCase):
return ARTICLES_REGEX.sub(' ' , _UpperCAmelCase)
def white_space_fix(_UpperCAmelCase):
return " ".join(text.split())
def remove_punc(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(_UpperCAmelCase):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase))))
def lowerCamelCase__ (_UpperCAmelCase):
if not s:
return []
return normalize_answer(_UpperCAmelCase).split()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return int(normalize_answer(_UpperCAmelCase) == normalize_answer(_UpperCAmelCase))
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = collections.Counter(_UpperCAmelCase) & collections.Counter(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = sum(common.values())
if len(_UpperCAmelCase) == 0 or len(_UpperCAmelCase) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE = qa['id']
SCREAMING_SNAKE_CASE = [t for t in qa['answers']['text'] if normalize_answer(_UpperCAmelCase)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
SCREAMING_SNAKE_CASE = ['']
if qid not in preds:
print(F'''Missing prediction for {qid}''')
continue
SCREAMING_SNAKE_CASE = preds[qid]
# Take max over all gold answers
SCREAMING_SNAKE_CASE = max(compute_exact(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers)
SCREAMING_SNAKE_CASE = max(compute_fa(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers)
return exact_scores, fa_scores
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
for qid, s in scores.items():
SCREAMING_SNAKE_CASE = na_probs[qid] > na_prob_thresh
if pred_na:
SCREAMING_SNAKE_CASE = float(not qid_to_has_ans[qid])
else:
SCREAMING_SNAKE_CASE = s
return new_scores
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None):
if not qid_list:
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores.values()) / total),
('f1', 1_00.0 * sum(fa_scores.values()) / total),
('total', total),
])
else:
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores[k] for k in qid_list) / total),
('f1', 1_00.0 * sum(fa_scores[k] for k in qid_list) / total),
('total', total),
])
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
for k in new_eval:
SCREAMING_SNAKE_CASE = new_eval[k]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
plt.step(_UpperCAmelCase , _UpperCAmelCase , color='b' , alpha=0.2 , where='post')
plt.fill_between(_UpperCAmelCase , _UpperCAmelCase , step='post' , alpha=0.2 , color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(_UpperCAmelCase)
plt.savefig(_UpperCAmelCase)
plt.clf()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None):
SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k])
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1.0
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = [1.0]
SCREAMING_SNAKE_CASE = [0.0]
SCREAMING_SNAKE_CASE = 0.0
for i, qid in enumerate(_UpperCAmelCase):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
SCREAMING_SNAKE_CASE = true_pos / float(i + 1)
SCREAMING_SNAKE_CASE = true_pos / float(_UpperCAmelCase)
if i == len(_UpperCAmelCase) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_UpperCAmelCase)
recalls.append(_UpperCAmelCase)
if out_image:
plot_pr_curve(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return {"ap": 1_00.0 * avg_prec}
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if out_image_dir and not os.path.exists(_UpperCAmelCase):
os.makedirs(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_exact.png') , title='Precision-Recall curve for Exact Match score' , )
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_f1.png') , title='Precision-Recall curve for F1 score' , )
SCREAMING_SNAKE_CASE = {k: float(_UpperCAmelCase) for k, v in qid_to_has_ans.items()}
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_oracle.png') , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_exact')
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_f1')
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_oracle')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if not qid_list:
return
SCREAMING_SNAKE_CASE = [na_probs[k] for k in qid_list]
SCREAMING_SNAKE_CASE = np.ones_like(_UpperCAmelCase) / float(len(_UpperCAmelCase))
plt.hist(_UpperCAmelCase , weights=_UpperCAmelCase , bins=20 , range=(0.0, 1.0))
plt.xlabel('Model probability of no-answer')
plt.ylabel('Proportion of dataset')
plt.title(F'''Histogram of no-answer probability: {name}''')
plt.savefig(os.path.join(_UpperCAmelCase , F'''na_prob_hist_{name}.png'''))
plt.clf()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
SCREAMING_SNAKE_CASE = num_no_ans
SCREAMING_SNAKE_CASE = cur_score
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k])
for i, qid in enumerate(_UpperCAmelCase):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
SCREAMING_SNAKE_CASE = scores[qid]
else:
if preds[qid]:
SCREAMING_SNAKE_CASE = -1
else:
SCREAMING_SNAKE_CASE = 0
cur_score += diff
if cur_score > best_score:
SCREAMING_SNAKE_CASE = cur_score
SCREAMING_SNAKE_CASE = na_probs[qid]
return 1_00.0 * best_score / len(_UpperCAmelCase), best_thresh
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = best_exact
SCREAMING_SNAKE_CASE = exact_thresh
SCREAMING_SNAKE_CASE = best_fa
SCREAMING_SNAKE_CASE = fa_thresh
def lowerCamelCase__ ():
with open(OPTS.data_file) as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = dataset_json['data']
with open(OPTS.pred_file) as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase)
else:
SCREAMING_SNAKE_CASE = {k: 0.0 for k in preds}
SCREAMING_SNAKE_CASE = make_qid_to_has_ans(_UpperCAmelCase) # maps qid to True/False
SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if v]
SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if not v]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_raw_scores(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh)
SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh)
SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase)
if has_ans_qids:
SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase)
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'HasAns')
if no_ans_qids:
SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase)
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'NoAns')
if OPTS.na_prob_file:
find_all_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir)
histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'hasAns')
histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'noAns')
if OPTS.out_file:
with open(OPTS.out_file , 'w') as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
else:
print(json.dumps(_UpperCAmelCase , indent=2))
if __name__ == "__main__":
a_ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 73 | 1 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _snake_case ( unittest.TestCase ):
_lowercase : Optional[int] = inspect.getfile(accelerate.test_utils )
_lowercase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
_lowercase : int = ['''accelerate''', '''launch''']
_lowercase : int = Path.home() / '''.cache/huggingface/accelerate'''
_lowercase : int = '''default_config.yaml'''
_lowercase : List[Any] = config_folder / config_file
_lowercase : str = config_folder / '''_default_config.yaml'''
_lowercase : str = Path('''tests/test_configs''' )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls) -> List[str]:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path)
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls) -> str:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy())
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
for config in sorted(self.test_config_path.glob('**/*.yaml')):
with self.subTest(config_file=a):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(a), self.test_file_path] , env=os.environ.copy())
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy())
class _snake_case ( unittest.TestCase ):
_lowercase : Optional[Any] = '''test-tpu'''
_lowercase : Optional[Any] = '''us-central1-a'''
_lowercase : List[str] = '''ls'''
_lowercase : Union[str, Any] = ['''accelerate''', '''tpu-config''']
_lowercase : Tuple = '''cd /usr/share'''
_lowercase : Optional[Any] = '''tests/test_samples/test_command_file.sh'''
_lowercase : List[Any] = '''Running gcloud compute tpus tpu-vm ssh'''
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , a , )
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , a , )
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=a)
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , a , )
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , a , )
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , a , )
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , a , )
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , a , )
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , a , )
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=a , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , a , )
| 73 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ : Dict = logging.get_logger(__name__)
class _snake_case ( A__ ):
def __init__( self , *a , **a) -> None:
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , a , )
super().__init__(*a , **a)
| 73 | 1 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCamelCase__ (_UpperCAmelCase):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name)
a_ : Any = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class _snake_case ( A__ ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=a , required=a , help='Model\'s type.')
train_parser.add_argument(
'--tf_checkpoint' , type=a , required=a , help='TensorFlow checkpoint path or folder.')
train_parser.add_argument(
'--pytorch_dump_output' , type=a , required=a , help='Path to the PyTorch saved model output.')
train_parser.add_argument('--config' , type=a , default='' , help='Configuration file path or folder.')
train_parser.add_argument(
'--finetuning_task_name' , type=a , default=a , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=a)
def __init__( self , a , a , a , a , a , *a , ) -> Any:
SCREAMING_SNAKE_CASE = logging.get_logger('transformers-cli/converting')
self._logger.info(f'''Loading model {model_type}''')
SCREAMING_SNAKE_CASE = model_type
SCREAMING_SNAKE_CASE = tf_checkpoint
SCREAMING_SNAKE_CASE = pytorch_dump_output
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = finetuning_task_name
def SCREAMING_SNAKE_CASE__ ( self) -> str:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(a)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a)
if "ckpt" in self._tf_checkpoint.lower():
SCREAMING_SNAKE_CASE = self._tf_checkpoint
SCREAMING_SNAKE_CASE = ''
else:
SCREAMING_SNAKE_CASE = self._tf_checkpoint
SCREAMING_SNAKE_CASE = ''
convert_transfo_xl_checkpoint_to_pytorch(
a , self._config , self._pytorch_dump_output , a)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]')
| 73 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _snake_case ( unittest.TestCase , A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = load_tool('text-classification')
self.tool.setup()
SCREAMING_SNAKE_CASE = load_tool('text-classification' , remote=a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(a , 'positive')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(a , 'positive')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(a , 'positive')
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(a , 'positive')
| 73 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''vivit'''
def __init__( self , __lowerCAmelCase=2_2_4 , __lowerCAmelCase=3_2 , __lowerCAmelCase=[2, 1_6, 1_6] , __lowerCAmelCase=3 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu_fast" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-06 , __lowerCAmelCase=True , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :Dict = hidden_size
__magic_name__ :Tuple = num_hidden_layers
__magic_name__ :Dict = num_attention_heads
__magic_name__ :int = intermediate_size
__magic_name__ :Optional[int] = hidden_act
__magic_name__ :str = hidden_dropout_prob
__magic_name__ :int = attention_probs_dropout_prob
__magic_name__ :Dict = initializer_range
__magic_name__ :Optional[int] = layer_norm_eps
__magic_name__ :Optional[int] = image_size
__magic_name__ :Optional[int] = num_frames
__magic_name__ :List[Any] = tubelet_size
__magic_name__ :List[str] = num_channels
__magic_name__ :Optional[int] = qkv_bias
super().__init__(**__lowerCAmelCase )
| 0 |
import sys
import turtle
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
if depth == 0:
return
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
a_ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
a_ : str = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 73 | 0 |
from math import pow, sqrt
def _A ( *_lowercase ) -> bool:
"""simple docstring"""
__UpperCamelCase = len(_lowercase ) > 0 and all(value > 0.0 for value in values )
return result
def _A ( _lowercase , _lowercase ) -> float | ValueError:
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowercase , _lowercase )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def _A ( _lowercase , _lowercase , _lowercase ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowercase , _lowercase , _lowercase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _A ( _lowercase , _lowercase , _lowercase ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowercase , _lowercase , _lowercase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _A ( _lowercase , _lowercase , _lowercase ) -> float | ValueError:
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_lowercase , _lowercase , _lowercase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _A ( _lowercase , _lowercase , _lowercase ) -> float | ValueError:
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_lowercase , _lowercase , _lowercase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 1 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a_ : Any = 'true'
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=16):
set_seed(42)
SCREAMING_SNAKE_CASE = RegressionModel()
SCREAMING_SNAKE_CASE = deepcopy(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = RegressionDataset(length=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase)
model.to(accelerator.device)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase)
return model, ddp_model, dataloader
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased')
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' , split='validation')
def tokenize_function(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase)
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(_UpperCAmelCase):
if use_longest:
return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt')
return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt')
return DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=16)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = Accelerator(dispatch_batches=_UpperCAmelCase , split_batches=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = get_dataloader(_UpperCAmelCase , not dispatch_batches)
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
for batch in dataloader:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCAmelCase)
targs.append(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.cat(_UpperCAmelCase), torch.cat(_UpperCAmelCase)
return logits, targs
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=16):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_basic_setup(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_predictions(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
assert (
len(_UpperCAmelCase) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCAmelCase)}'''
def lowerCamelCase__ (_UpperCAmelCase = False , _UpperCAmelCase = False):
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_mrpc_setup(_UpperCAmelCase , _UpperCAmelCase)
# First do baseline
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['no']
model.to(_UpperCAmelCase)
model.eval()
for batch in dataloader:
batch.to(_UpperCAmelCase)
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=_UpperCAmelCase , references=batch['labels'])
SCREAMING_SNAKE_CASE = metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
SCREAMING_SNAKE_CASE = batch['labels']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=_UpperCAmelCase , references=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key]), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''')
test_mrpc(_UpperCAmelCase , _UpperCAmelCase)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase)
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''')
test_torch_metrics(_UpperCAmelCase , 99)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**')
SCREAMING_SNAKE_CASE = Accelerator()
test_torch_metrics(_UpperCAmelCase , 512)
accelerator.state._reset_state()
def lowerCamelCase__ (_UpperCAmelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 73 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
UpperCAmelCase_ = """</w>"""
UpperCAmelCase_ = """@@ """
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] ) -> List[str]:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
# Speech2Text2 has no max input length
UpperCAmelCase_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Dict = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : str , ) -> Dict:
super().__init__(
unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , )
_A = do_lower_case
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_A = json.load(__lowerCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
_A = None
_A = None
else:
with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
_A = merges_handle.read().split('''\n''' )[:-1]
_A = [tuple(merge.split()[:2] ) for merge in merges]
_A = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_A = {}
@property
def snake_case_ ( self : List[str] ) -> int:
return len(self.decoder )
def snake_case_ ( self : Dict ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
_A = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__lowerCAmelCase ):
try:
_A = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__lowerCAmelCase )
_A = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
_A = get_pairs(__lowerCAmelCase )
_A = ''' '''.join(__lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
_A = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(__lowerCAmelCase ):
_A = word.replace(__lowerCAmelCase , '''''' )
_A = word.replace(''' ''' , __lowerCAmelCase )
_A = word
return word
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]:
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
_A = text.lower()
_A = text.split()
_A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str ) -> int:
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def snake_case_ ( self : str , __lowerCAmelCase : int ) -> str:
_A = self.decoder.get(__lowerCAmelCase , self.unk_token )
return result
def snake_case_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> str:
_A = ''' '''.join(__lowerCAmelCase )
# make sure @@ tokens are concatenated
_A = ''''''.join(string.split(__lowerCAmelCase ) )
return string
def snake_case_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' )
_A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_A = token_index
writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 2 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a_ : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 73 | 0 |
'''simple docstring'''
def A_( A : int):
if not isinstance(A , A):
UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(A)
if number < 0:
return False
UpperCamelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
a_ : str = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def lowerCamelCase__ (_UpperCAmelCase=True):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A__ ) )
class _snake_case ( A__ ):
_lowercase : Optional[Any] = None
_lowercase : Optional[Any] = None
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Optional[Any]:
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = dataset_module_factory(a , cache_dir=a)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=a)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a).replace(os.sep , '/'),
config.DATASET_INFO_FILENAME,
])
SCREAMING_SNAKE_CASE = cached_path(a , cache_dir=a)
self.assertTrue(os.path.exists(a))
@pytest.mark.integration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('test_hf_gcp') / 'test_wikipedia_simple'
SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
SCREAMING_SNAKE_CASE = None
builder_instance.download_and_prepare()
SCREAMING_SNAKE_CASE = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_UpperCAmelCase , _UpperCAmelCase)
assert "train" in ds
assert isinstance(ds['train'] , _UpperCAmelCase)
assert next(iter(ds['train']))
| 73 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCamelCase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
from __future__ import annotations
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_UpperCAmelCase)
if n > 1:
factors.append(_UpperCAmelCase)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
_lowercase = TypeVar("""T""")
_lowercase = TypeVar("""U""")
class UpperCAmelCase_ ( Generic[T, U] ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = key
_lowerCAmelCase = val
_lowerCAmelCase = None
_lowerCAmelCase = None
def __repr__( self ):
"""simple docstring"""
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class UpperCAmelCase_ ( Generic[T, U] ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
_lowerCAmelCase = DoubleLinkedListNode(_lowercase , _lowercase )
_lowerCAmelCase = DoubleLinkedListNode(_lowercase , _lowercase )
_lowerCAmelCase , _lowerCAmelCase = self.rear, self.head
def __repr__( self ):
"""simple docstring"""
_lowerCAmelCase = ["""DoubleLinkedList"""]
_lowerCAmelCase = self.head
while node.next is not None:
rep.append(str(_lowercase ) )
_lowerCAmelCase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowerCAmelCase = node
_lowerCAmelCase = previous
_lowerCAmelCase = node
_lowerCAmelCase = self.rear
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_lowerCAmelCase = node.next
_lowerCAmelCase = node.prev
_lowerCAmelCase = None
_lowerCAmelCase = None
return node
class UpperCAmelCase_ ( Generic[T, U] ):
'''simple docstring'''
_lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = DoubleLinkedList()
_lowerCAmelCase = capacity
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = {}
def __repr__( self ):
"""simple docstring"""
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , _lowercase ):
"""simple docstring"""
return key in self.cache
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if key in self.cache:
self.hits += 1
_lowerCAmelCase = self.cache[key]
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_lowercase )
return node.val
self.miss += 1
return None
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowerCAmelCase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_lowercase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowerCAmelCase = DoubleLinkedListNode(_lowercase , _lowercase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_lowerCAmelCase = value
self.list.add(_lowercase )
@classmethod
def _lowercase ( cls , _lowercase = 128 ):
"""simple docstring"""
def cache_decorator_inner(_lowercase ) -> Callable[..., U]:
def cache_decorator_wrapper(*_lowercase ) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowerCAmelCase = LRUCache(_lowercase )
_lowerCAmelCase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_lowerCAmelCase = func(*_lowercase )
cls.decorator_function_to_instance_map[func].put(args[0] , _lowercase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_lowercase , """cache_info""" , _lowercase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
import math
import os
import sys
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = ''
try:
with open(_UpperCAmelCase , 'rb') as binary_file:
SCREAMING_SNAKE_CASE = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lexicon.pop(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = last_match_id
if math.loga(_UpperCAmelCase).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE = '0' + lexicon[curr_key]
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = '', ''
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
for i in range(len(_UpperCAmelCase)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
index += 1
SCREAMING_SNAKE_CASE = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
return result
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = os.path.getsize(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 8
try:
with open(_UpperCAmelCase , 'wb') as opened_file:
SCREAMING_SNAKE_CASE = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCAmelCase) , _UpperCAmelCase)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append('10000000')
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array:
opened_file.write(int(_UpperCAmelCase , 2).to_bytes(1 , byteorder='big'))
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = read_file_binary(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = compress_data(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = add_file_length(_UpperCAmelCase , _UpperCAmelCase)
write_file_binary(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 73 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_lowerCamelCase = None
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
_lowerCamelCase = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
_lowerCamelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = ["input_ids", "attention_mask"]
lowerCamelCase_ = MBartTokenizer
lowerCamelCase_ = []
lowerCamelCase_ = []
def __init__( self :int , __A :Optional[Any]=None , __A :Union[str, Any]=None , __A :Optional[Any]="<s>" , __A :int="</s>" , __A :Tuple="</s>" , __A :int="<s>" , __A :str="<unk>" , __A :str="<pad>" , __A :Dict="<mask>" , __A :Optional[Any]=None , __A :int=None , __A :Tuple=None , **__A :Optional[int] , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
vocab_file=__A , tokenizer_file=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , src_lang=__A , tgt_lang=__A , additional_special_tokens=__A , **__A , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
SCREAMING_SNAKE_CASE__ = {
lang_code: self.convert_tokens_to_ids(__A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE__ = src_lang if src_lang is not None else """en_XX"""
SCREAMING_SNAKE_CASE__ = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _snake_case ( self :Union[str, Any] ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self :Tuple , __A :str ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self :Any , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self :Any , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self :List[str] , __A :Dict , __A :str , __A :Optional[str] , __A :Optional[str] , **__A :int ) -> Dict:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
SCREAMING_SNAKE_CASE__ = src_lang
SCREAMING_SNAKE_CASE__ = self(__A , add_special_tokens=__A , return_tensors=__A , **__A )
SCREAMING_SNAKE_CASE__ = self.convert_tokens_to_ids(__A )
SCREAMING_SNAKE_CASE__ = tgt_lang_id
return inputs
def _snake_case ( self :Optional[int] , __A :List[str] , __A :str = "en_XX" , __A :Optional[List[str]] = None , __A :str = "ro_RO" , **__A :Optional[int] , ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = src_lang
SCREAMING_SNAKE_CASE__ = tgt_lang
return super().prepare_seqaseq_batch(__A , __A , **__A )
def _snake_case ( self :List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self :Optional[int] ) -> List[Any]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self :Any , __A :Tuple ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.convert_tokens_to_ids(__A )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE__ = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE__ = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self :Any , __A :str ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.convert_tokens_to_ids(__A )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE__ = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE__ = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self :str , __A :str , __A :Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,) | 6 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase__ (_UpperCAmelCase):
return 1.0 / (1.0 + np.exp(-_outputs))
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = np.max(_outputs , axis=-1 , keepdims=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = np.exp(_outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_UpperCAmelCase)
class _snake_case ( A__ ):
_lowercase : Tuple = '''sigmoid'''
_lowercase : List[str] = '''softmax'''
_lowercase : Tuple = '''none'''
@add_end_docstrings(
A__ , R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class _snake_case ( A__ ):
_lowercase : Optional[Any] = False
_lowercase : Tuple = ClassificationFunction.NONE
def __init__( self , **a) -> Optional[Any]:
super().__init__(**a)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
def SCREAMING_SNAKE_CASE__ ( self , a=None , a=None , a="" , **a) -> Tuple:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
SCREAMING_SNAKE_CASE = tokenizer_kwargs
SCREAMING_SNAKE_CASE = {}
if hasattr(self.model.config , 'return_all_scores') and return_all_scores is None:
SCREAMING_SNAKE_CASE = self.model.config.return_all_scores
if isinstance(a , a) or top_k is None:
SCREAMING_SNAKE_CASE = top_k
SCREAMING_SNAKE_CASE = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , a , )
if return_all_scores:
SCREAMING_SNAKE_CASE = None
else:
SCREAMING_SNAKE_CASE = 1
if isinstance(a , a):
SCREAMING_SNAKE_CASE = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
SCREAMING_SNAKE_CASE = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *a , **a) -> Optional[int]:
SCREAMING_SNAKE_CASE = super().__call__(*a , **a)
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
SCREAMING_SNAKE_CASE = 'top_k' not in kwargs
if isinstance(args[0] , a) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def SCREAMING_SNAKE_CASE__ ( self , a , **a) -> Dict[str, GenericTensor]:
SCREAMING_SNAKE_CASE = self.framework
if isinstance(a , a):
return self.tokenizer(**a , return_tensors=a , **a)
elif isinstance(a , a) and len(a) == 1 and isinstance(inputs[0] , a) and len(inputs[0]) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a , **a)
elif isinstance(a , a):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.')
return self.tokenizer(a , return_tensors=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]:
return self.model(**a)
def SCREAMING_SNAKE_CASE__ ( self , a , a=None , a=1 , a=True) -> Any:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
SCREAMING_SNAKE_CASE = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
SCREAMING_SNAKE_CASE = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply') and function_to_apply is None:
SCREAMING_SNAKE_CASE = self.model.config.function_to_apply
else:
SCREAMING_SNAKE_CASE = ClassificationFunction.NONE
SCREAMING_SNAKE_CASE = model_outputs['logits'][0]
SCREAMING_SNAKE_CASE = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
SCREAMING_SNAKE_CASE = sigmoid(a)
elif function_to_apply == ClassificationFunction.SOFTMAX:
SCREAMING_SNAKE_CASE = softmax(a)
elif function_to_apply == ClassificationFunction.NONE:
SCREAMING_SNAKE_CASE = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''')
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
SCREAMING_SNAKE_CASE = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(a)
]
if not _legacy:
dict_scores.sort(key=lambda a: x["score"] , reverse=a)
if top_k is not None:
SCREAMING_SNAKE_CASE = dict_scores[:top_k]
return dict_scores
| 73 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a = logging.get_logger(__name__) # pylint: disable=invalid-name
a = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def _snake_case ( _snake_case : Optional[int] , _snake_case : Any , _snake_case : List[str]=8 ) -> Any:
'''simple docstring'''
_A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Union[str, Any]=5_12 , _snake_case : Union[str, Any]=5_12 ) -> List[str]:
'''simple docstring'''
_A = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_A = np.array(pil_image.convert('RGB' ) )
_A = arr.astype(np.floataa ) / 127.5 - 1
_A = np.transpose(_snake_case , [2, 0, 1] )
_A = torch.from_numpy(_snake_case ).unsqueeze(0 )
return image
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : DDPMScheduler , _UpperCAmelCase : VQModel , ):
super().__init__()
self.register_modules(
unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , movq=_UpperCAmelCase , )
_A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Dict ):
# get the original timestep using init_timestep
_A = min(int(num_inference_steps * strength ) , _UpperCAmelCase )
_A = max(num_inference_steps - init_timestep , 0 )
_A = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int]=None ):
if not isinstance(_UpperCAmelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCAmelCase )}''' )
_A = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase )
_A = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_A = image
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_UpperCAmelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase )
]
_A = torch.cat(_UpperCAmelCase , dim=0 )
else:
_A = self.movq.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase )
_A = self.movq.config.scaling_factor * init_latents
_A = torch.cat([init_latents] , dim=0 )
_A = init_latents.shape
_A = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
# get latents
_A = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = init_latents
return latents
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Tuple=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_A = torch.device(F'''cuda:{gpu_id}''' )
_A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List[str]=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
_A = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A = None
for cpu_offloaded_model in [self.unet, self.movq]:
_A , _A = cpu_offload_with_hook(_UpperCAmelCase , _UpperCAmelCase , prev_module_hook=_UpperCAmelCase )
# We'll offload the last model manually.
_A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self : Tuple ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCAmelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCAmelCase )
def __call__( self : Any , _UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _UpperCAmelCase : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , _UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 100 , _UpperCAmelCase : float = 4.0 , _UpperCAmelCase : float = 0.3 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , ):
_A = self._execution_device
_A = guidance_scale > 1.0
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = torch.cat(_UpperCAmelCase , dim=0 )
_A = image_embeds.shape[0]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = torch.cat(_UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
_A = image_embeds.repeat_interleave(_UpperCAmelCase , dim=0 )
_A = negative_image_embeds.repeat_interleave(_UpperCAmelCase , dim=0 )
_A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = [image]
if not all(isinstance(_UpperCAmelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'''Input is in incorrect format: {[type(_UpperCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
_A = torch.cat([prepare_image(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for i in image] , dim=0 )
_A = image.to(dtype=image_embeds.dtype , device=_UpperCAmelCase )
_A = self.movq.encode(_UpperCAmelCase )['latents']
_A = latents.repeat_interleave(_UpperCAmelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCAmelCase , device=_UpperCAmelCase )
_A , _A = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_A , _A = downscale_height_and_width(_UpperCAmelCase , _UpperCAmelCase , self.movq_scale_factor )
_A = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , image_embeds.dtype , _UpperCAmelCase , _UpperCAmelCase )
for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = {'image_embeds': image_embeds}
_A = self.unet(
sample=_UpperCAmelCase , timestep=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , added_cond_kwargs=_UpperCAmelCase , return_dict=_UpperCAmelCase , )[0]
if do_classifier_free_guidance:
_A , _A = noise_pred.split(latents.shape[1] , dim=1 )
_A , _A = noise_pred.chunk(2 )
_A , _A = variance_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A , _A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase , )[0]
# post-processing
_A = self.movq.decode(_UpperCAmelCase , force_not_quantize=_UpperCAmelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_A = image * 0.5 + 0.5
_A = image.clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 7 |
import heapq as hq
import math
from collections.abc import Iterator
class _snake_case :
def __init__( self , a) -> Optional[Any]:
SCREAMING_SNAKE_CASE = str(id_)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = {} # {vertex:distance}
def __lt__( self , a) -> Dict:
return self.key < other.key
def __repr__( self) -> Optional[Any]:
return self.id
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]:
self.neighbors.append(a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Tuple:
SCREAMING_SNAKE_CASE = weight
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1])
graph[b - 1].add_neighbor(graph[a - 1])
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCAmelCase)
graph[b - 1].add_edge(graph[a - 1] , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
for u in graph:
SCREAMING_SNAKE_CASE = math.inf
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = graph[:]
while q:
SCREAMING_SNAKE_CASE = min(_UpperCAmelCase)
q.remove(_UpperCAmelCase)
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE = u
SCREAMING_SNAKE_CASE = u.edges[v.id]
for i in range(1 , len(_UpperCAmelCase)):
a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1))
return a
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
for u in graph:
SCREAMING_SNAKE_CASE = math.inf
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = list(_UpperCAmelCase)
hq.heapify(_UpperCAmelCase)
while h:
SCREAMING_SNAKE_CASE = hq.heappop(_UpperCAmelCase)
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE = u
SCREAMING_SNAKE_CASE = u.edges[v.id]
hq.heapify(_UpperCAmelCase)
for i in range(1 , len(_UpperCAmelCase)):
yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1)
def lowerCamelCase__ ():
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowercase__ : Optional[Any] = sys.version_info >= (3, 10)
def _lowerCAmelCase ( __snake_case : Union[str, Any]=None , __snake_case : int=None ) -> str:
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = field(default='''toto''' , metadata={'''help''': '''help message'''} )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = None
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''titi'''
lowerCAmelCase = '''toto'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''titi'''
lowerCAmelCase = '''toto'''
lowerCAmelCase = 42
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = "toto"
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = BasicEnum(self.foo)
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = "toto"
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = MixedTypeEnum(self.foo)
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = None
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''help message'''} )
lowerCAmelCase = None
lowerCAmelCase = list_field(default=[] )
lowerCAmelCase = list_field(default=[] )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = list_field(default=[] )
lowerCAmelCase = list_field(default=[1, 2, 3] )
lowerCAmelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = field()
lowerCAmelCase = field()
lowerCAmelCase = field()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = BasicEnum(self.required_enum)
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = field()
lowerCAmelCase = None
lowerCAmelCase = field(default='''toto''' , metadata={'''help''': '''help message'''} )
lowerCAmelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = None
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = None
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''help message'''} )
lowerCAmelCase = None
lowerCAmelCase = list_field(default=[] )
lowerCAmelCase = list_field(default=[] )
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
__A : List[Any] = {k: v for k, v in vars(_UpperCAmelCase).items() if k != 'container'}
__A : Union[str, Any] = {k: v for k, v in vars(_UpperCAmelCase).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , _UpperCAmelCase) and yy.get('choices' , _UpperCAmelCase):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](_UpperCAmelCase) , yy['type'](_UpperCAmelCase))
del xx["type"], yy["type"]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = HfArgumentParser(_UpperCAmelCase)
__A : int = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_UpperCAmelCase , required=_UpperCAmelCase)
expected.add_argument('--bar' , type=_UpperCAmelCase , required=_UpperCAmelCase)
expected.add_argument('--baz' , type=_UpperCAmelCase , required=_UpperCAmelCase)
expected.add_argument('--flag' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?')
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : int = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((__A) ,) : Any = parser.parse_args_into_dataclasses(_UpperCAmelCase , look_for_args_file=_UpperCAmelCase)
self.assertFalse(example.flag)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = HfArgumentParser(_UpperCAmelCase)
__A : List[str] = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=_UpperCAmelCase)
expected.add_argument('--baz' , default='toto' , type=_UpperCAmelCase , help='help message')
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?')
expected.add_argument('--baz' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=_UpperCAmelCase , dest='baz')
expected.add_argument('--opt' , type=_UpperCAmelCase , default=_UpperCAmelCase)
__A : str = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCAmelCase)
for dataclass_type in dataclass_types:
__A : Tuple = HfArgumentParser(_UpperCAmelCase)
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : str = parser.parse_args([])
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase))
__A : Tuple = parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase))
__A : Optional[Any] = parser.parse_args(['--foo', '--baz'])
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase))
__A : Optional[int] = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase))
__A : List[Any] = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = HfArgumentParser(_UpperCAmelCase)
__A : Tuple = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : int = parser.parse_args([])
self.assertEqual(args.foo , 'toto')
__A : Optional[Any] = parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
__A : Union[str, Any] = parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
__A : List[Any] = parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
__A : Dict = parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
__A : Tuple = parser.parse_args_into_dataclasses(['--foo', '42'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = "toto"
__A : str = HfArgumentParser(_UpperCAmelCase)
__A : Optional[Any] = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : Dict = parser.parse_args([])
self.assertEqual(args.foo , 'toto')
__A : Optional[int] = parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
__A : Optional[Any] = parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = HfArgumentParser(_UpperCAmelCase)
__A : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=_UpperCAmelCase)
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=_UpperCAmelCase)
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_UpperCAmelCase)
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=_UpperCAmelCase)
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : int = parser.parse_args([])
self.assertEqual(
_UpperCAmelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3]) , )
__A : Optional[int] = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(_UpperCAmelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7]))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--foo' , default=_UpperCAmelCase , type=_UpperCAmelCase)
expected.add_argument('--bar' , default=_UpperCAmelCase , type=_UpperCAmelCase , help='help message')
expected.add_argument('--baz' , default=_UpperCAmelCase , type=_UpperCAmelCase)
expected.add_argument('--ces' , nargs='+' , default=[] , type=_UpperCAmelCase)
expected.add_argument('--des' , nargs='+' , default=[] , type=_UpperCAmelCase)
__A : Optional[int] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCAmelCase)
for dataclass_type in dataclass_types:
__A : Dict = HfArgumentParser(_UpperCAmelCase)
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = parser.parse_args([])
self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , bar=_UpperCAmelCase , baz=_UpperCAmelCase , ces=[] , des=[]))
__A : str = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(_UpperCAmelCase , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3]))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = HfArgumentParser(_UpperCAmelCase)
__A : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=_UpperCAmelCase , required=_UpperCAmelCase)
expected.add_argument('--required_str' , type=_UpperCAmelCase , required=_UpperCAmelCase)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=_UpperCAmelCase , )
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = HfArgumentParser(_UpperCAmelCase)
__A : Tuple = argparse.ArgumentParser()
expected.add_argument('--foo' , type=_UpperCAmelCase , required=_UpperCAmelCase)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=_UpperCAmelCase , )
expected.add_argument('--opt' , type=_UpperCAmelCase , default=_UpperCAmelCase)
expected.add_argument('--baz' , default='toto' , type=_UpperCAmelCase , help='help message')
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_UpperCAmelCase)
self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = HfArgumentParser(_UpperCAmelCase)
__A : List[str] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
__A : str = parser.parse_dict(_UpperCAmelCase)[0]
__A : Optional[Any] = BasicExample(**_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = HfArgumentParser(_UpperCAmelCase)
__A : List[Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(_UpperCAmelCase , parser.parse_dict , _UpperCAmelCase , allow_extra_keys=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = HfArgumentParser(_UpperCAmelCase)
__A : Union[str, Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__A : List[Any] = os.path.join(_UpperCAmelCase , 'temp_json')
os.mkdir(_UpperCAmelCase)
with open(temp_local_path + '.json' , 'w+') as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + '.json'))[0]
__A : str = BasicExample(**_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = HfArgumentParser(_UpperCAmelCase)
__A : Optional[Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__A : List[str] = os.path.join(_UpperCAmelCase , 'temp_yaml')
os.mkdir(_UpperCAmelCase)
with open(temp_local_path + '.yaml' , 'w+') as f:
yaml.dump(_UpperCAmelCase , _UpperCAmelCase)
__A : str = parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0]
__A : Optional[Any] = BasicExample(**_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = HfArgumentParser(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase) | 8 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[Any] = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 73 | 0 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , ) -> Optional[int]:
if config_name_or_path is None:
A__ = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
A__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
A__ = question_encoder_name_or_path
A__ = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
A__ = RagConfig.from_pretrained(__UpperCamelCase )
A__ = AutoConfig.from_pretrained(__UpperCamelCase )
A__ = AutoConfig.from_pretrained(__UpperCamelCase )
A__ = gen_config
A__ = question_encoder_config
A__ = model_class.from_pretrained_question_encoder_generator(
__UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
rag_model.save_pretrained(__UpperCamelCase )
# Sanity check.
model_class.from_pretrained(__UpperCamelCase )
# Save tokenizers.
A__ = AutoTokenizer.from_pretrained(__UpperCamelCase )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
A__ = AutoTokenizer.from_pretrained(__UpperCamelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 9 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Union[str, Any] = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _snake_case ( A__ ):
_lowercase : Optional[Any] = '''decision_transformer'''
_lowercase : str = ['''past_key_values''']
_lowercase : Union[str, Any] = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a=17 , a=4 , a=128 , a=4096 , a=True , a=1 , a=1024 , a=3 , a=1 , a=None , a="relu" , a=0.1 , a=0.1 , a=0.1 , a=1E-5 , a=0.02 , a=True , a=True , a=5_0256 , a=5_0256 , a=False , a=False , **a , ) -> List[str]:
SCREAMING_SNAKE_CASE = state_dim
SCREAMING_SNAKE_CASE = act_dim
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = max_ep_len
SCREAMING_SNAKE_CASE = action_tanh
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = n_inner
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = attn_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scale_attn_weights
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(bos_token_id=a , eos_token_id=a , **a)
| 73 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "swinv2"
UpperCAmelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , _A : Optional[Any]=224 , _A : Dict=4 , _A : Dict=3 , _A : List[str]=96 , _A : Any=[2, 2, 6, 2] , _A : Any=[3, 6, 12, 24] , _A : List[Any]=7 , _A : int=4.0 , _A : Optional[Any]=True , _A : Dict=0.0 , _A : Union[str, Any]=0.0 , _A : Any=0.1 , _A : Dict="gelu" , _A : int=False , _A : Optional[Any]=0.02 , _A : Union[str, Any]=1e-5 , _A : str=32 , **_A : str , ):
super().__init__(**_A )
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = embed_dim
_UpperCamelCase = depths
_UpperCamelCase = len(_A )
_UpperCamelCase = num_heads
_UpperCamelCase = window_size
_UpperCamelCase = mlp_ratio
_UpperCamelCase = qkv_bias
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = drop_path_rate
_UpperCamelCase = hidden_act
_UpperCamelCase = use_absolute_embeddings
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCamelCase = int(embed_dim * 2 ** (len(_A ) - 1) )
_UpperCamelCase = (0, 0, 0, 0)
| 10 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ : Optional[int] = 16
a_ : Any = 32
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 16):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('bert-base-cased')
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc')
def tokenize_function(_UpperCAmelCase):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(_UpperCAmelCase):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE = 8
else:
SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
_UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
# Initialize accelerator
SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE = config['lr']
SCREAMING_SNAKE_CASE = int(config['num_epochs'])
SCREAMING_SNAKE_CASE = int(config['seed'])
SCREAMING_SNAKE_CASE = int(config['batch_size'])
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc')
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE = model.to(accelerator.device)
# Instantiate optimizer
SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=_UpperCAmelCase)
# Instantiate scheduler
SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# Now we train the model
for epoch in range(_UpperCAmelCase):
model.train()
for step, batch in enumerate(_UpperCAmelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.loss
SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _UpperCAmelCase)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
main()
| 73 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : str = 'wav2vec2'
def __init__(self , A=32 , A=768 , A=12 , A=12 , A=3_072 , A="gelu" , A=0.1 , A=0.1 , A=0.1 , A=0.0 , A=0.0 , A=0.1 , A=0.1 , A=0.02 , A=1E-5 , A="group" , A="gelu" , A=(512, 512, 512, 512, 512, 512, 512) , A=(5, 2, 2, 2, 2, 2, 2) , A=(10, 3, 3, 3, 3, 2, 2) , A=False , A=128 , A=16 , A=False , A=True , A=0.05 , A=10 , A=2 , A=0.0 , A=10 , A=0 , A=320 , A=2 , A=0.1 , A=100 , A=256 , A=256 , A=0.1 , A="sum" , A=False , A=False , A=256 , A=(512, 512, 512, 512, 1_500) , A=(5, 3, 3, 1, 1) , A=(1, 2, 3, 1, 1) , A=512 , A=0 , A=1 , A=2 , A=False , A=3 , A=2 , A=3 , A=None , A=None , **A , ) -> int:
"""simple docstring"""
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(A )
_a = list(A )
_a = list(A )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# adapter
_a = add_adapter
_a = adapter_kernel_size
_a = adapter_stride
_a = num_adapter_layers
_a = output_hidden_size or hidden_size
_a = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a = list(A )
_a = list(A )
_a = list(A )
_a = xvector_output_dim
@property
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 11 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : int = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 73 | 0 |
lowerCamelCase__ : dict[tuple[int, int, int], int] = {}
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowercase__ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 )
lowercase__ : List[str] = state_late + state_absent + state_ontime
lowercase__ : List[Any] = prizestrings
return prizestrings
def UpperCamelCase ( lowercase_ = 30 ) -> int:
'''simple docstring'''
return _calculate(lowercase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 12 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False):
if radian_mode:
return [magnitude * cos(_UpperCAmelCase), magnitude * sin(_UpperCAmelCase)]
return [magnitude * cos(radians(_UpperCAmelCase)), magnitude * sin(radians(_UpperCAmelCase))]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 10**-1):
SCREAMING_SNAKE_CASE = cross(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = sum(_UpperCAmelCase)
return abs(_UpperCAmelCase) < eps
if __name__ == "__main__":
# Test to check if it works
a_ : int = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
a_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
a_ : Dict = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
a_ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
a_ : int = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
a_ : Optional[Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[int] = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
A__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : int = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class _snake_case ( A__ ):
_lowercase : Dict = '''cvt'''
def __init__( self , a=3 , a=[7, 3, 3] , a=[4, 2, 2] , a=[2, 1, 1] , a=[64, 192, 384] , a=[1, 3, 6] , a=[1, 2, 10] , a=[4.0, 4.0, 4.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.1] , a=[True, True, True] , a=[False, False, True] , a=["dw_bn", "dw_bn", "dw_bn"] , a=[3, 3, 3] , a=[1, 1, 1] , a=[2, 2, 2] , a=[1, 1, 1] , a=[1, 1, 1] , a=0.02 , a=1E-12 , **a , ) -> List[Any]:
super().__init__(**a)
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = patch_stride
SCREAMING_SNAKE_CASE = patch_padding
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = depth
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = attention_drop_rate
SCREAMING_SNAKE_CASE = drop_rate
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = cls_token
SCREAMING_SNAKE_CASE = qkv_projection_method
SCREAMING_SNAKE_CASE = kernel_qkv
SCREAMING_SNAKE_CASE = padding_kv
SCREAMING_SNAKE_CASE = stride_kv
SCREAMING_SNAKE_CASE = padding_q
SCREAMING_SNAKE_CASE = stride_q
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
| 73 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
a__ = logging.get_logger(__name__)
a__ = {'''vocab_file''': '''spiece.model'''}
a__ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
a__ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
a__ = 0
a__ = 1
a__ = 2
a__ = 3
a__ = 4
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = "left"
def __init__( self , _a , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , _a = None , **_a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_a : Optional[int] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_a : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_a : List[Any] = 3
_a : List[str] = do_lower_case
_a : int = remove_space
_a : Optional[Any] = keep_accents
_a : List[Any] = vocab_file
_a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def __lowercase ( self ) -> int:
return len(self.sp_model )
def __lowercase ( self ) -> List[Any]:
_a : str = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
_a : Any = self.__dict__.copy()
_a : Optional[Any] = None
return state
def __setstate__( self , _a ) -> Optional[Any]:
_a : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a : Any = {}
_a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self , _a ) -> int:
if self.remove_space:
_a : Union[str, Any] = ''' '''.join(inputs.strip().split() )
else:
_a : Tuple = inputs
_a : Dict = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
_a : Tuple = unicodedata.normalize('''NFKD''' , _a )
_a : Tuple = ''''''.join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
_a : int = outputs.lower()
return outputs
def __lowercase ( self , _a ) -> List[str]:
_a : int = self.preprocess_text(_a )
_a : int = self.sp_model.encode(_a , out_type=_a )
_a : List[str] = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
_a : Optional[int] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a : Tuple = cur_pieces[1:]
else:
_a : List[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def __lowercase ( self , _a ) -> Union[str, Any]:
return self.sp_model.PieceToId(_a )
def __lowercase ( self , _a ) -> Optional[Any]:
return self.sp_model.IdToPiece(_a )
def __lowercase ( self , _a ) -> Any:
_a : Optional[Any] = ''''''.join(_a ).replace(_a , ''' ''' ).strip()
return out_string
def __lowercase ( self , _a , _a = False , _a = None , _a = True , **_a , ) -> str:
_a : int = kwargs.pop('''use_source_tokenizer''' , _a )
_a : str = self.convert_ids_to_tokens(_a , skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_a : int = []
_a : str = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
_a : str = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_a : Any = ''''''.join(_a )
_a : List[str] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_a : str = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def __lowercase ( self , _a , _a = None ) -> List[int]:
_a : Optional[Any] = [self.sep_token_id]
_a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowercase ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1, 1]
return ([0] * len(_a )) + [1, 1]
def __lowercase ( self , _a , _a = None ) -> List[int]:
_a : Optional[int] = [self.sep_token_id]
_a : Optional[int] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowercase ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Optional[int] = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
_a : Any = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 14 |
def lowerCamelCase__ (_UpperCAmelCase = 10 , _UpperCAmelCase = 1000 , _UpperCAmelCase = True):
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase)
and isinstance(_UpperCAmelCase , _UpperCAmelCase)
and isinstance(_UpperCAmelCase , _UpperCAmelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)')
return min_val if option else max_val
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return int((number_a + number_a) / 2)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)')
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value')
def answer(_UpperCAmelCase) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...')
SCREAMING_SNAKE_CASE = lower
SCREAMING_SNAKE_CASE = higher
SCREAMING_SNAKE_CASE = []
while True:
SCREAMING_SNAKE_CASE = get_avg(_UpperCAmelCase , _UpperCAmelCase)
last_numbers.append(_UpperCAmelCase)
if answer(_UpperCAmelCase) == "low":
SCREAMING_SNAKE_CASE = number
elif answer(_UpperCAmelCase) == "high":
SCREAMING_SNAKE_CASE = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''')
print(F'''details : {last_numbers!s}''')
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = int(input('Enter lower value : ').strip())
SCREAMING_SNAKE_CASE = int(input('Enter high value : ').strip())
SCREAMING_SNAKE_CASE = int(input('Enter value to guess : ').strip())
guess_the_number(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
main()
| 73 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : List[str] = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 15 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _snake_case :
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=False , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , use_stable_embedding=a , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Any:
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , )
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> int:
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a)
model.to(a)
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size)
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1)
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1)
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1]).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3))
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( A__ , A__ , A__ , unittest.TestCase ):
_lowercase : List[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_lowercase : str = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_lowercase : List[str] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : List[str] = False
_lowercase : Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 10] , config.vocab_size)
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
original_model.to(a)
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
scaled_model.to(a)
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5))
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5))
| 73 | 0 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__A : Optional[Any] = datasets.utils.logging.get_logger(__name__)
__A : Any = ['names', 'prefix']
__A : Tuple = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
__A : Dict = ['encoding_errors', 'on_bad_lines']
__A : str = ['date_format']
@dataclass
class _SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ = ","
lowerCamelCase__ = None
lowerCamelCase__ = "infer"
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = False
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = True
lowerCamelCase__ = None
lowerCamelCase__ = "."
lowerCamelCase__ = None
lowerCamelCase__ = '"'
lowerCamelCase__ = 0
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 0
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = None
lowerCamelCase__ = 1_0_0_0_0
lowerCamelCase__ = None
lowerCamelCase__ = "strict"
lowerCamelCase__ = "error"
lowerCamelCase__ = None
def _snake_case ( self : Tuple ):
if self.delimiter is not None:
SCREAMING_SNAKE_CASE = self.delimiter
if self.column_names is not None:
SCREAMING_SNAKE_CASE = self.column_names
@property
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __lowerCamelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ = CsvConfig
def _snake_case ( self : int ):
return datasets.DatasetInfo(features=self.config.features )
def _snake_case ( self : Any , __lowerCamelCase : str ):
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCamelCase , (str, list, tuple) ):
SCREAMING_SNAKE_CASE = data_files
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = [files]
SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = [files]
SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={"files": files} ) )
return splits
def _snake_case ( self : List[Any] , __lowerCamelCase : pa.Table ):
if self.config.features is not None:
SCREAMING_SNAKE_CASE = self.config.features.arrow_schema
if all(not require_storage_cast(__lowerCamelCase ) for feature in self.config.features.values() ):
# cheaper cast
SCREAMING_SNAKE_CASE = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__lowerCamelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE = table_cast(__lowerCamelCase , __lowerCamelCase )
return pa_table
def _snake_case ( self : Optional[int] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
SCREAMING_SNAKE_CASE = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__lowerCamelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE = pd.read_csv(__lowerCamelCase , iterator=__lowerCamelCase , dtype=__lowerCamelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = pa.Table.from_pandas(__lowerCamelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCamelCase )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(__lowerCamelCase )}: {e}" )
raise | 16 |
from __future__ import annotations
a_ : str = []
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
for i in range(len(_UpperCAmelCase)):
if board[row][i] == 1:
return False
for i in range(len(_UpperCAmelCase)):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , -1 , -1)):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , len(_UpperCAmelCase))):
if board[i][j] == 1:
return False
return True
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
if row >= len(_UpperCAmelCase):
solution.append(_UpperCAmelCase)
printboard(_UpperCAmelCase)
print()
return True
for i in range(len(_UpperCAmelCase)):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 1
solve(_UpperCAmelCase , row + 1)
SCREAMING_SNAKE_CASE = 0
return False
def lowerCamelCase__ (_UpperCAmelCase):
for i in range(len(_UpperCAmelCase)):
for j in range(len(_UpperCAmelCase)):
if board[i][j] == 1:
print('Q' , end=' ')
else:
print('.' , end=' ')
print()
# n=int(input("The no. of queens"))
a_ : Tuple = 8
a_ : int = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 73 | 0 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''M-CLIP'''
def __init__( self : List[str] , __A : str=1024 , __A : Optional[Any]=768 , **__A : Any ):
__A : List[str] = transformerDimSize
__A : int = imageDimSize
super().__init__(**__A )
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[str] = MCLIPConfig
def __init__( self : Union[str, Any] , __A : Dict , *__A : Any , **__A : Union[str, Any] ):
super().__init__(__A , *__A , **__A )
__A : Any = XLMRobertaModel(__A )
__A : Tuple = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowerCAmelCase_ ( self : Dict , __A : Dict , __A : int ):
__A : Dict = self.transformer(input_ids=__A , attention_mask=__A )[0]
__A : Optional[int] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__A ), embs
| 17 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : List[Any] = StableDiffusionDiffEditPipeline
_lowercase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
_lowercase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
_lowercase : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowercase : List[str] = frozenset([] )
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
SCREAMING_SNAKE_CASE = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_zero=a , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(a)
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 16, 16) , rng=random.Random(a)).to(a)
SCREAMING_SNAKE_CASE = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a)).to(a)
if str(a).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a)
SCREAMING_SNAKE_CASE = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a)
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB')
if str(a).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a)
SCREAMING_SNAKE_CASE = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> Optional[int]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a)
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB')
if str(a).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a)
SCREAMING_SNAKE_CASE = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
if not hasattr(self.pipeline_class , '_optional_components'):
return
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a , a , a)
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components})
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a)
SCREAMING_SNAKE_CASE = pipe(**a)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a)
SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(a)
pipe_loaded.to(a)
pipe_loaded.set_progress_bar_config(disable=a)
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a , a) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a)
SCREAMING_SNAKE_CASE = pipe_loaded(**a)[0]
SCREAMING_SNAKE_CASE = np.abs(output - output_loaded).max()
self.assertLess(a , 1E-4)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_dummy_mask_inputs(a)
SCREAMING_SNAKE_CASE = pipe.generate_mask(**a)
SCREAMING_SNAKE_CASE = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16))
SCREAMING_SNAKE_CASE = np.array([0] * 9)
SCREAMING_SNAKE_CASE = np.abs(mask_slice.flatten() - expected_slice).max()
self.assertLessEqual(a , 1E-3)
self.assertEqual(mask[0, -3, -4] , 0)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(a)
SCREAMING_SNAKE_CASE = pipe.invert(**a).images
SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
SCREAMING_SNAKE_CASE = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(a , 1E-3)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=5E-3)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler(**a)
SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler(**a)
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(a)
SCREAMING_SNAKE_CASE = pipe.invert(**a).images
SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
SCREAMING_SNAKE_CASE = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(a , 1E-3)
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls) -> List[Any]:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png')
SCREAMING_SNAKE_CASE = raw_image.convert('RGB').resize((768, 768))
SCREAMING_SNAKE_CASE = raw_image
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = torch.manual_seed(0)
SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE = DDIMScheduler.from_config(pipe.scheduler.config)
SCREAMING_SNAKE_CASE = DDIMInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = 'a bowl of fruit'
SCREAMING_SNAKE_CASE = 'a bowl of pears'
SCREAMING_SNAKE_CASE = pipe.generate_mask(
image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , )
SCREAMING_SNAKE_CASE = pipe.invert(
prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a).latents
SCREAMING_SNAKE_CASE = pipe(
prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
SCREAMING_SNAKE_CASE = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png').resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = torch.manual_seed(0)
SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = 'a bowl of fruit'
SCREAMING_SNAKE_CASE = 'a bowl of pears'
SCREAMING_SNAKE_CASE = pipe.generate_mask(
image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , )
SCREAMING_SNAKE_CASE = pipe.invert(
prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a , num_inference_steps=25 , ).latents
SCREAMING_SNAKE_CASE = pipe(
prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
SCREAMING_SNAKE_CASE = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png').resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
| 73 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = "mctct"
def __init__( self , _lowerCAmelCase=8065 , _lowerCAmelCase=1536 , _lowerCAmelCase=36 , _lowerCAmelCase=6144 , _lowerCAmelCase=4 , _lowerCAmelCase=384 , _lowerCAmelCase=920 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.3 , _lowerCAmelCase="relu" , _lowerCAmelCase=0.02 , _lowerCAmelCase=0.3 , _lowerCAmelCase=0.3 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0.3 , _lowerCAmelCase=1 , _lowerCAmelCase=(7,) , _lowerCAmelCase=(3,) , _lowerCAmelCase=80 , _lowerCAmelCase=1 , _lowerCAmelCase=None , _lowerCAmelCase="sum" , _lowerCAmelCase=False , **_lowerCAmelCase , ) -> int:
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = attention_head_dim
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = layerdrop
_lowerCAmelCase = hidden_act
_lowerCAmelCase = initializer_range
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = pad_token_id
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = conv_glu_dim
_lowerCAmelCase = conv_dropout
_lowerCAmelCase = num_conv_layers
_lowerCAmelCase = input_feat_per_channel
_lowerCAmelCase = input_channels
_lowerCAmelCase = conv_channels
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
f'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 18 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : Any = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _snake_case ( A__ ):
_lowercase : Optional[int] = '''unispeech'''
def __init__( self , a=32 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=0.1 , a=0.0 , a=0.0 , a=0.1 , a=0.1 , a=0.02 , a=1E-5 , a="group" , a="gelu" , a=(512, 512, 512, 512, 512, 512, 512) , a=(5, 2, 2, 2, 2, 2, 2) , a=(10, 3, 3, 3, 3, 2, 2) , a=False , a=128 , a=16 , a=False , a=True , a=0.05 , a=10 , a=2 , a=0.0 , a=10 , a=0 , a=320 , a=2 , a=0.1 , a=100 , a=256 , a=256 , a=0.1 , a="mean" , a=False , a=False , a=256 , a=80 , a=0 , a=1 , a=2 , a=0.5 , **a , ) -> Optional[int]:
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a)
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = feat_extract_norm
SCREAMING_SNAKE_CASE = feat_extract_activation
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = conv_bias
SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE = len(self.conv_dim)
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = feat_proj_dropout
SCREAMING_SNAKE_CASE = final_dropout
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_ctc_classes
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = do_stable_layer_norm
SCREAMING_SNAKE_CASE = use_weighted_layer_sum
SCREAMING_SNAKE_CASE = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE = apply_spec_augment
SCREAMING_SNAKE_CASE = mask_time_prob
SCREAMING_SNAKE_CASE = mask_time_length
SCREAMING_SNAKE_CASE = mask_time_min_masks
SCREAMING_SNAKE_CASE = mask_feature_prob
SCREAMING_SNAKE_CASE = mask_feature_length
SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE = num_codevectors_per_group
SCREAMING_SNAKE_CASE = num_codevector_groups
SCREAMING_SNAKE_CASE = contrastive_logits_temperature
SCREAMING_SNAKE_CASE = feat_quantizer_dropout
SCREAMING_SNAKE_CASE = num_negatives
SCREAMING_SNAKE_CASE = codevector_dim
SCREAMING_SNAKE_CASE = proj_codevector_dim
SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE = ctc_loss_reduction
SCREAMING_SNAKE_CASE = ctc_zero_infinity
# pretraining loss
SCREAMING_SNAKE_CASE = replace_prob
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 73 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'wavlm'
def __init__( self , __a=32 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.1 , __a=0.02 , __a=1e-5 , __a="group" , __a="gelu" , __a=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __a=(5, 2, 2, 2, 2, 2, 2) , __a=(10, 3, 3, 3, 3, 2, 2) , __a=False , __a=1_28 , __a=16 , __a=3_20 , __a=8_00 , __a=False , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=3_20 , __a=2 , __a=0.1 , __a=1_00 , __a=2_56 , __a=2_56 , __a=0.1 , __a="mean" , __a=False , __a=False , __a=2_56 , __a=(5_12, 5_12, 5_12, 5_12, 15_00) , __a=(5, 3, 3, 1, 1) , __a=(1, 2, 3, 1, 1) , __a=5_12 , __a=80 , __a=0 , __a=1 , __a=2 , __a=False , __a=3 , __a=2 , __a=3 , __a=None , **__a , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a)
_UpperCamelCase = hidden_size
_UpperCamelCase = feat_extract_norm
_UpperCamelCase = feat_extract_activation
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = conv_bias
_UpperCamelCase = num_buckets
_UpperCamelCase = max_bucket_distance
_UpperCamelCase = num_conv_pos_embeddings
_UpperCamelCase = num_conv_pos_embedding_groups
_UpperCamelCase = len(self.conv_dim)
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = feat_proj_dropout
_UpperCamelCase = final_dropout
_UpperCamelCase = layerdrop
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = num_ctc_classes
_UpperCamelCase = vocab_size
_UpperCamelCase = do_stable_layer_norm
_UpperCamelCase = use_weighted_layer_sum
_UpperCamelCase = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase = apply_spec_augment
_UpperCamelCase = mask_time_prob
_UpperCamelCase = mask_time_length
_UpperCamelCase = mask_time_min_masks
_UpperCamelCase = mask_feature_prob
_UpperCamelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
_UpperCamelCase = num_codevectors_per_group
_UpperCamelCase = num_codevector_groups
_UpperCamelCase = contrastive_logits_temperature
_UpperCamelCase = num_negatives
_UpperCamelCase = codevector_dim
_UpperCamelCase = proj_codevector_dim
_UpperCamelCase = diversity_loss_weight
# ctc loss
_UpperCamelCase = ctc_loss_reduction
_UpperCamelCase = ctc_zero_infinity
# adapter
_UpperCamelCase = add_adapter
_UpperCamelCase = adapter_kernel_size
_UpperCamelCase = adapter_stride
_UpperCamelCase = num_adapter_layers
_UpperCamelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 19 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
a_ : Optional[Any] = re.compile(R'\b(a|an|the)\b', re.UNICODE)
a_ : List[str] = None
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.')
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.')
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.')
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).')
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.')
parser.add_argument(
'--na-prob-thresh' , '-t' , type=_UpperCAmelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=_UpperCAmelCase , help='Save precision-recall curves to directory.')
parser.add_argument('--verbose' , '-v' , action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE = bool(qa['answers']['text'])
return qid_to_has_ans
def lowerCamelCase__ (_UpperCAmelCase):
def remove_articles(_UpperCAmelCase):
return ARTICLES_REGEX.sub(' ' , _UpperCAmelCase)
def white_space_fix(_UpperCAmelCase):
return " ".join(text.split())
def remove_punc(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(_UpperCAmelCase):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase))))
def lowerCamelCase__ (_UpperCAmelCase):
if not s:
return []
return normalize_answer(_UpperCAmelCase).split()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return int(normalize_answer(_UpperCAmelCase) == normalize_answer(_UpperCAmelCase))
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = collections.Counter(_UpperCAmelCase) & collections.Counter(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = sum(common.values())
if len(_UpperCAmelCase) == 0 or len(_UpperCAmelCase) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE = qa['id']
SCREAMING_SNAKE_CASE = [t for t in qa['answers']['text'] if normalize_answer(_UpperCAmelCase)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
SCREAMING_SNAKE_CASE = ['']
if qid not in preds:
print(F'''Missing prediction for {qid}''')
continue
SCREAMING_SNAKE_CASE = preds[qid]
# Take max over all gold answers
SCREAMING_SNAKE_CASE = max(compute_exact(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers)
SCREAMING_SNAKE_CASE = max(compute_fa(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers)
return exact_scores, fa_scores
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
for qid, s in scores.items():
SCREAMING_SNAKE_CASE = na_probs[qid] > na_prob_thresh
if pred_na:
SCREAMING_SNAKE_CASE = float(not qid_to_has_ans[qid])
else:
SCREAMING_SNAKE_CASE = s
return new_scores
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None):
if not qid_list:
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores.values()) / total),
('f1', 1_00.0 * sum(fa_scores.values()) / total),
('total', total),
])
else:
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores[k] for k in qid_list) / total),
('f1', 1_00.0 * sum(fa_scores[k] for k in qid_list) / total),
('total', total),
])
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
for k in new_eval:
SCREAMING_SNAKE_CASE = new_eval[k]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
plt.step(_UpperCAmelCase , _UpperCAmelCase , color='b' , alpha=0.2 , where='post')
plt.fill_between(_UpperCAmelCase , _UpperCAmelCase , step='post' , alpha=0.2 , color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(_UpperCAmelCase)
plt.savefig(_UpperCAmelCase)
plt.clf()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None):
SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k])
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1.0
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = [1.0]
SCREAMING_SNAKE_CASE = [0.0]
SCREAMING_SNAKE_CASE = 0.0
for i, qid in enumerate(_UpperCAmelCase):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
SCREAMING_SNAKE_CASE = true_pos / float(i + 1)
SCREAMING_SNAKE_CASE = true_pos / float(_UpperCAmelCase)
if i == len(_UpperCAmelCase) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_UpperCAmelCase)
recalls.append(_UpperCAmelCase)
if out_image:
plot_pr_curve(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return {"ap": 1_00.0 * avg_prec}
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if out_image_dir and not os.path.exists(_UpperCAmelCase):
os.makedirs(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_exact.png') , title='Precision-Recall curve for Exact Match score' , )
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_f1.png') , title='Precision-Recall curve for F1 score' , )
SCREAMING_SNAKE_CASE = {k: float(_UpperCAmelCase) for k, v in qid_to_has_ans.items()}
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_oracle.png') , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_exact')
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_f1')
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_oracle')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if not qid_list:
return
SCREAMING_SNAKE_CASE = [na_probs[k] for k in qid_list]
SCREAMING_SNAKE_CASE = np.ones_like(_UpperCAmelCase) / float(len(_UpperCAmelCase))
plt.hist(_UpperCAmelCase , weights=_UpperCAmelCase , bins=20 , range=(0.0, 1.0))
plt.xlabel('Model probability of no-answer')
plt.ylabel('Proportion of dataset')
plt.title(F'''Histogram of no-answer probability: {name}''')
plt.savefig(os.path.join(_UpperCAmelCase , F'''na_prob_hist_{name}.png'''))
plt.clf()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
SCREAMING_SNAKE_CASE = num_no_ans
SCREAMING_SNAKE_CASE = cur_score
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k])
for i, qid in enumerate(_UpperCAmelCase):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
SCREAMING_SNAKE_CASE = scores[qid]
else:
if preds[qid]:
SCREAMING_SNAKE_CASE = -1
else:
SCREAMING_SNAKE_CASE = 0
cur_score += diff
if cur_score > best_score:
SCREAMING_SNAKE_CASE = cur_score
SCREAMING_SNAKE_CASE = na_probs[qid]
return 1_00.0 * best_score / len(_UpperCAmelCase), best_thresh
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = best_exact
SCREAMING_SNAKE_CASE = exact_thresh
SCREAMING_SNAKE_CASE = best_fa
SCREAMING_SNAKE_CASE = fa_thresh
def lowerCamelCase__ ():
with open(OPTS.data_file) as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = dataset_json['data']
with open(OPTS.pred_file) as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase)
else:
SCREAMING_SNAKE_CASE = {k: 0.0 for k in preds}
SCREAMING_SNAKE_CASE = make_qid_to_has_ans(_UpperCAmelCase) # maps qid to True/False
SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if v]
SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if not v]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_raw_scores(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh)
SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh)
SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase)
if has_ans_qids:
SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase)
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'HasAns')
if no_ans_qids:
SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase)
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'NoAns')
if OPTS.na_prob_file:
find_all_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir)
histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'hasAns')
histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'noAns')
if OPTS.out_file:
with open(OPTS.out_file , 'w') as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
else:
print(json.dumps(_UpperCAmelCase , indent=2))
if __name__ == "__main__":
a_ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 73 | 0 |
from __future__ import annotations
import queue
class lowercase_ :
def __init__( self , lowercase_) -> Dict:
a__ =data
a__ =None
a__ =None
def _lowercase( ):
print('\n********Press N to stop entering at any point of time********\n' )
a__ =input('Enter the value of the root node: ' ).strip().lower()
a__ =queue.Queue()
a__ =TreeNode(int(__a ) )
q.put(__a )
while not q.empty():
a__ =q.get()
a__ =f"""Enter the left node of {node_found.data}: """
a__ =input(__a ).strip().lower() or 'n'
if check == "n":
return tree_node
a__ =TreeNode(int(__a ) )
a__ =left_node
q.put(__a )
a__ =f"""Enter the right node of {node_found.data}: """
a__ =input(__a ).strip().lower() or 'n'
if check == "n":
return tree_node
a__ =TreeNode(int(__a ) )
a__ =right_node
q.put(__a )
raise
def _lowercase( __a : TreeNode ):
if not isinstance(__a , __a ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def _lowercase( __a : TreeNode ):
if not isinstance(__a , __a ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def _lowercase( __a : TreeNode ):
if not isinstance(__a , __a ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def _lowercase( __a : TreeNode ):
if not isinstance(__a , __a ) or not node:
return
a__ =queue.Queue()
q.put(__a )
while not q.empty():
a__ =q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _lowercase( __a : TreeNode ):
if not isinstance(__a , __a ) or not node:
return
a__ =queue.Queue()
q.put(__a )
while not q.empty():
a__ =[]
while not q.empty():
a__ =q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__a )
def _lowercase( __a : TreeNode ):
if not isinstance(__a , __a ) or not node:
return
a__ =[]
a__ =node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(__a )
a__ =n.left
# end of while means current node doesn't have left child
a__ =stack.pop()
# start to traverse its right child
a__ =n.right
def _lowercase( __a : TreeNode ):
if not isinstance(__a , __a ) or not node:
return
a__ =[]
a__ =node
while n or stack:
while n:
stack.append(__a )
a__ =n.left
a__ =stack.pop()
print(n.data , end=',' )
a__ =n.right
def _lowercase( __a : TreeNode ):
if not isinstance(__a , __a ) or not node:
return
a__ , a__ =[], []
a__ =node
stacka.append(__a )
while stacka: # to find the reversed order of post order, store it in stack2
a__ =stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__a )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def _lowercase( __a : str = "" , __a : Optional[Any]=50 , __a : str="*" ):
if not s:
return "\n" + width * char
a__ , a__ =divmod(width - len(__a ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
_lowerCAmelCase: TreeNode = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 20 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ : Dict = logging.get_logger(__name__)
class _snake_case ( A__ ):
def __init__( self , *a , **a) -> None:
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , a , )
super().__init__(*a , **a)
| 73 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@slow
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=__snake_case ).to(__snake_case )
__magic_name__ : Any =AutoTokenizer.from_pretrained("""google/mt5-small""" )
__magic_name__ : Dict =tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
__magic_name__ : Tuple =tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
__magic_name__ : List[Any] =model(input_ids.to(__snake_case ) , labels=labels.to(__snake_case ) ).loss
__magic_name__ : List[Any] =-(labels.shape[-1] * loss.item())
__magic_name__ : Optional[int] =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 21 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _snake_case ( unittest.TestCase , A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = load_tool('text-classification')
self.tool.setup()
SCREAMING_SNAKE_CASE = load_tool('text-classification' , remote=a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(a , 'positive')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(a , 'positive')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(a , 'positive')
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(a , 'positive')
| 73 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class A ( _a ):
lowercase_ = 'Wav2Vec2FeatureExtractor'
lowercase_ = 'AutoTokenizer'
def __init__( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
_a = self.feature_extractor
_a = False
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
try:
return super().from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
except OSError:
warnings.warn(
F'Loading a tokenizer inside {cls.__name__} from a config that does not'
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , lowerCAmelCase_ , )
_a = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = WavaVecaCTCTokenizer.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
return cls(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
def __call__( self : str , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase_ , **lowerCAmelCase_ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_a = kwargs.pop('''raw_speech''' )
else:
_a = kwargs.pop('''audio''' , lowerCAmelCase_ )
_a = kwargs.pop('''sampling_rate''' , lowerCAmelCase_ )
_a = kwargs.pop('''text''' , lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
_a = args[0]
_a = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_a = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None:
_a = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_a = encodings['''input_ids''']
return inputs
def __lowerCAmelCase ( self : str , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Any ) -> Any:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCAmelCase_ , **lowerCAmelCase_ )
_a = kwargs.pop('''input_features''' , lowerCAmelCase_ )
_a = kwargs.pop('''labels''' , lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
_a = args[0]
_a = args[1:]
if input_features is not None:
_a = self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
if labels is not None:
_a = self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_a = labels['''input_ids''']
return input_features
def __lowerCAmelCase ( self : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@contextmanager
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_a = True
_a = self.tokenizer
yield
_a = self.feature_extractor
_a = False
| 22 |
import sys
import turtle
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
if depth == 0:
return
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
a_ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
a_ : str = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 73 | 0 |
import string
def _snake_case (__lowercase):
for key in range(len(string.ascii_uppercase)):
UpperCamelCase_ = ''
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCamelCase_ = string.ascii_uppercase.find(__lowercase)
UpperCamelCase_ = num - key
if num < 0:
UpperCamelCase_ = num + len(string.ascii_uppercase)
UpperCamelCase_ = translated + string.ascii_uppercase[num]
else:
UpperCamelCase_ = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""")
def _snake_case ():
UpperCamelCase_ = input('Encrypted message: ')
UpperCamelCase_ = message.upper()
decrypt(__lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 23 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a_ : Any = 'true'
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=16):
set_seed(42)
SCREAMING_SNAKE_CASE = RegressionModel()
SCREAMING_SNAKE_CASE = deepcopy(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = RegressionDataset(length=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase)
model.to(accelerator.device)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase)
return model, ddp_model, dataloader
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased')
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' , split='validation')
def tokenize_function(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase)
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(_UpperCAmelCase):
if use_longest:
return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt')
return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt')
return DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=16)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = Accelerator(dispatch_batches=_UpperCAmelCase , split_batches=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = get_dataloader(_UpperCAmelCase , not dispatch_batches)
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
for batch in dataloader:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCAmelCase)
targs.append(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.cat(_UpperCAmelCase), torch.cat(_UpperCAmelCase)
return logits, targs
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=16):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_basic_setup(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_predictions(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
assert (
len(_UpperCAmelCase) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCAmelCase)}'''
def lowerCamelCase__ (_UpperCAmelCase = False , _UpperCAmelCase = False):
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_mrpc_setup(_UpperCAmelCase , _UpperCAmelCase)
# First do baseline
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['no']
model.to(_UpperCAmelCase)
model.eval()
for batch in dataloader:
batch.to(_UpperCAmelCase)
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=_UpperCAmelCase , references=batch['labels'])
SCREAMING_SNAKE_CASE = metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
SCREAMING_SNAKE_CASE = batch['labels']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=_UpperCAmelCase , references=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key]), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''')
test_mrpc(_UpperCAmelCase , _UpperCAmelCase)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase)
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''')
test_torch_metrics(_UpperCAmelCase , 99)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**')
SCREAMING_SNAKE_CASE = Accelerator()
test_torch_metrics(_UpperCAmelCase , 512)
accelerator.state._reset_state()
def lowerCamelCase__ (_UpperCAmelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 73 | 0 |
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCAmelCase_ : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _UpperCamelCase (_lowerCamelCase : dict[int, list[int]] , _lowerCamelCase : int , _lowerCamelCase : list[bool] )-> list[int]:
'''simple docstring'''
__snake_case = True
__snake_case = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
order.append(_lowerCamelCase )
return order
def _UpperCamelCase (_lowerCamelCase : dict[int, list[int]] , _lowerCamelCase : int , _lowerCamelCase : list[bool] )-> list[int]:
'''simple docstring'''
__snake_case = True
__snake_case = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return component
def _UpperCamelCase (_lowerCamelCase : dict[int, list[int]] )-> list[list[int]]:
'''simple docstring'''
__snake_case = len(_lowerCamelCase ) * [False]
__snake_case = {vert: [] for vert in range(len(_lowerCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_lowerCamelCase )
__snake_case = []
for i, was_visited in enumerate(_lowerCamelCase ):
if not was_visited:
order += topology_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case = []
__snake_case = len(_lowerCamelCase ) * [False]
for i in range(len(_lowerCamelCase ) ):
__snake_case = order[len(_lowerCamelCase ) - i - 1]
if not visited[vert]:
__snake_case = find_components(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
components_list.append(_lowerCamelCase )
return components_list
| 24 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a_ : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 73 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
a_ : str = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def lowerCamelCase__ (_UpperCAmelCase=True):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A__ ) )
class _snake_case ( A__ ):
_lowercase : Optional[Any] = None
_lowercase : Optional[Any] = None
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Optional[Any]:
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = dataset_module_factory(a , cache_dir=a)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=a)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a).replace(os.sep , '/'),
config.DATASET_INFO_FILENAME,
])
SCREAMING_SNAKE_CASE = cached_path(a , cache_dir=a)
self.assertTrue(os.path.exists(a))
@pytest.mark.integration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('test_hf_gcp') / 'test_wikipedia_simple'
SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
SCREAMING_SNAKE_CASE = None
builder_instance.download_and_prepare()
SCREAMING_SNAKE_CASE = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_UpperCAmelCase , _UpperCAmelCase)
assert "train" in ds
assert isinstance(ds['train'] , _UpperCAmelCase)
assert next(iter(ds['train']))
| 73 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __lowercase ):
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self._create_example_records()
__snake_case : str = Dataset.from_list(__magic_name__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__magic_name__ ):
self.assertDictEqual(__magic_name__ , example_records[i] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self._create_example_records()
__snake_case : Dict = Dataset.from_list(__magic_name__ )
__snake_case : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : str ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
__snake_case : Union[str, Any] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__snake_case : Optional[int] = Dataset.from_list(__magic_name__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def lowercase__ ( self : List[str] ) -> Optional[Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
__snake_case : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__snake_case : int = Dataset.from_list(__magic_name__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = Dataset.from_list([] )
self.assertEqual(len(__magic_name__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 26 |
from __future__ import annotations
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_UpperCAmelCase)
if n > 1:
factors.append(_UpperCAmelCase)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__A : Any = logging.get_logger(__name__)
__A : Any = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__A : int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__A : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = 'whisper'
__magic_name__ = ['past_key_values']
__magic_name__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , snake_case_=5_1865 , snake_case_=80 , snake_case_=6 , snake_case_=4 , snake_case_=6 , snake_case_=4 , snake_case_=1536 , snake_case_=1536 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=5_0257 , snake_case_=True , snake_case_=True , snake_case_="gelu" , snake_case_=256 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=False , snake_case_=1500 , snake_case_=448 , snake_case_=5_0256 , snake_case_=5_0256 , snake_case_=5_0256 , snake_case_=None , snake_case_=[220, 5_0256] , snake_case_=False , snake_case_=256 , snake_case_=False , snake_case_=0.05 , snake_case_=10 , snake_case_=2 , snake_case_=0.0 , snake_case_=10 , snake_case_=0 , snake_case_=7 , **snake_case_ , ):
_A = vocab_size
_A = num_mel_bins
_A = d_model
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_layers
_A = decoder_attention_heads
_A = decoder_ffn_dim
_A = encoder_ffn_dim
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_A = classifier_proj_size
_A = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_A = apply_spec_augment
_A = mask_time_prob
_A = mask_time_length
_A = mask_time_min_masks
_A = mask_feature_prob
_A = mask_feature_length
_A = mask_feature_min_masks
_A = median_filter_width
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , suppress_tokens=snake_case_ , begin_suppress_tokens=snake_case_ , **snake_case_ , )
class lowerCamelCase( __snake_case ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ):
_A = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
_A = {0: 'batch'}
else:
_A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction='inputs' )
return common_inputs
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 2_2050 , snake_case_ = 5.0 , snake_case_ = 220 , ):
_A = OrderedDict()
_A = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=snake_case_ , framework=snake_case_ , sampling_rate=snake_case_ , time_duration=snake_case_ , frequency=snake_case_ , )
_A = encoder_inputs['input_features'].shape[2]
_A = encoder_sequence_length // 2 if self.use_past else seq_length
_A = super().generate_dummy_inputs(
preprocessor.tokenizer , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_A = encoder_inputs.pop('input_features' )
_A = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
_A = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def lowerCAmelCase__ ( self ):
return 1E-3
| 27 |
import math
import os
import sys
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = ''
try:
with open(_UpperCAmelCase , 'rb') as binary_file:
SCREAMING_SNAKE_CASE = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lexicon.pop(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = last_match_id
if math.loga(_UpperCAmelCase).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE = '0' + lexicon[curr_key]
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = '', ''
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
for i in range(len(_UpperCAmelCase)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
index += 1
SCREAMING_SNAKE_CASE = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
return result
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = os.path.getsize(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 8
try:
with open(_UpperCAmelCase , 'wb') as opened_file:
SCREAMING_SNAKE_CASE = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCAmelCase) , _UpperCAmelCase)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append('10000000')
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array:
opened_file.write(int(_UpperCAmelCase , 2).to_bytes(1 , byteorder='big'))
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = read_file_binary(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = compress_data(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = add_file_length(_UpperCAmelCase , _UpperCAmelCase)
write_file_binary(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 73 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : Tuple = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__UpperCamelCase )
if n > 1:
factors.append(__UpperCamelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase__ (_UpperCAmelCase):
return 1.0 / (1.0 + np.exp(-_outputs))
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = np.max(_outputs , axis=-1 , keepdims=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = np.exp(_outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_UpperCAmelCase)
class _snake_case ( A__ ):
_lowercase : Tuple = '''sigmoid'''
_lowercase : List[str] = '''softmax'''
_lowercase : Tuple = '''none'''
@add_end_docstrings(
A__ , R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class _snake_case ( A__ ):
_lowercase : Optional[Any] = False
_lowercase : Tuple = ClassificationFunction.NONE
def __init__( self , **a) -> Optional[Any]:
super().__init__(**a)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
def SCREAMING_SNAKE_CASE__ ( self , a=None , a=None , a="" , **a) -> Tuple:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
SCREAMING_SNAKE_CASE = tokenizer_kwargs
SCREAMING_SNAKE_CASE = {}
if hasattr(self.model.config , 'return_all_scores') and return_all_scores is None:
SCREAMING_SNAKE_CASE = self.model.config.return_all_scores
if isinstance(a , a) or top_k is None:
SCREAMING_SNAKE_CASE = top_k
SCREAMING_SNAKE_CASE = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , a , )
if return_all_scores:
SCREAMING_SNAKE_CASE = None
else:
SCREAMING_SNAKE_CASE = 1
if isinstance(a , a):
SCREAMING_SNAKE_CASE = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
SCREAMING_SNAKE_CASE = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *a , **a) -> Optional[int]:
SCREAMING_SNAKE_CASE = super().__call__(*a , **a)
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
SCREAMING_SNAKE_CASE = 'top_k' not in kwargs
if isinstance(args[0] , a) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def SCREAMING_SNAKE_CASE__ ( self , a , **a) -> Dict[str, GenericTensor]:
SCREAMING_SNAKE_CASE = self.framework
if isinstance(a , a):
return self.tokenizer(**a , return_tensors=a , **a)
elif isinstance(a , a) and len(a) == 1 and isinstance(inputs[0] , a) and len(inputs[0]) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a , **a)
elif isinstance(a , a):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.')
return self.tokenizer(a , return_tensors=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]:
return self.model(**a)
def SCREAMING_SNAKE_CASE__ ( self , a , a=None , a=1 , a=True) -> Any:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
SCREAMING_SNAKE_CASE = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
SCREAMING_SNAKE_CASE = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply') and function_to_apply is None:
SCREAMING_SNAKE_CASE = self.model.config.function_to_apply
else:
SCREAMING_SNAKE_CASE = ClassificationFunction.NONE
SCREAMING_SNAKE_CASE = model_outputs['logits'][0]
SCREAMING_SNAKE_CASE = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
SCREAMING_SNAKE_CASE = sigmoid(a)
elif function_to_apply == ClassificationFunction.SOFTMAX:
SCREAMING_SNAKE_CASE = softmax(a)
elif function_to_apply == ClassificationFunction.NONE:
SCREAMING_SNAKE_CASE = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''')
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
SCREAMING_SNAKE_CASE = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(a)
]
if not _legacy:
dict_scores.sort(key=lambda a: x["score"] , reverse=a)
if top_k is not None:
SCREAMING_SNAKE_CASE = dict_scores[:top_k]
return dict_scores
| 73 | 0 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
A_ = logging.get_logger(__name__)
@dataclass
class __lowerCamelCase :
a__: str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
a__: str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
a__: int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.task_name.lower()
class __lowerCamelCase ( lowerCAmelCase ):
a__: Dict = 'train'
a__: str = 'dev'
a__: Union[str, Any] = 'test'
class __lowerCamelCase ( lowerCAmelCase ):
a__: GlueDataTrainingArguments
a__: str
a__: List[InputFeatures]
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = Split.train , UpperCAmelCase = None , ):
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , UpperCAmelCase , )
lowerCamelCase_ = args
lowerCamelCase_ = glue_processors[args.task_name]()
lowerCamelCase_ = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}" , )
lowerCamelCase_ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase_ , lowerCamelCase_ = label_list[2], label_list[1]
lowerCamelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + '''.lock'''
with FileLock(UpperCAmelCase ):
if os.path.exists(UpperCAmelCase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(UpperCAmelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
else:
logger.info(f"Creating features from dataset file at {args.data_dir}" )
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCamelCase_ = self.processor.get_test_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCamelCase_ = examples[:limit_length]
lowerCamelCase_ = glue_convert_examples_to_features(
UpperCAmelCase , UpperCAmelCase , max_length=args.max_seq_length , label_list=UpperCAmelCase , output_mode=self.output_mode , )
lowerCamelCase_ = time.time()
torch.save(self.features , UpperCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self ):
return len(self.features )
def __getitem__( self , UpperCAmelCase ):
return self.features[i]
def UpperCAmelCase__ ( self ):
return self.label_list
| 29 |
import heapq as hq
import math
from collections.abc import Iterator
class _snake_case :
def __init__( self , a) -> Optional[Any]:
SCREAMING_SNAKE_CASE = str(id_)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = {} # {vertex:distance}
def __lt__( self , a) -> Dict:
return self.key < other.key
def __repr__( self) -> Optional[Any]:
return self.id
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]:
self.neighbors.append(a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Tuple:
SCREAMING_SNAKE_CASE = weight
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1])
graph[b - 1].add_neighbor(graph[a - 1])
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCAmelCase)
graph[b - 1].add_edge(graph[a - 1] , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
for u in graph:
SCREAMING_SNAKE_CASE = math.inf
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = graph[:]
while q:
SCREAMING_SNAKE_CASE = min(_UpperCAmelCase)
q.remove(_UpperCAmelCase)
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE = u
SCREAMING_SNAKE_CASE = u.edges[v.id]
for i in range(1 , len(_UpperCAmelCase)):
a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1))
return a
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
for u in graph:
SCREAMING_SNAKE_CASE = math.inf
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = list(_UpperCAmelCase)
hq.heapify(_UpperCAmelCase)
while h:
SCREAMING_SNAKE_CASE = hq.heappop(_UpperCAmelCase)
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE = u
SCREAMING_SNAKE_CASE = u.edges[v.id]
hq.heapify(_UpperCAmelCase)
for i in range(1 , len(_UpperCAmelCase)):
yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1)
def lowerCamelCase__ ():
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , _a , )
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = RobertaConfig
lowerCAmelCase = '''roberta'''
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
super().__init__(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = RobertaEmbeddings(_SCREAMING_SNAKE_CASE )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , _a , )
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = RobertaConfig
lowerCAmelCase = '''roberta'''
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> Tuple:
super().__init__(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = config.num_labels
UpperCAmelCase_ : List[str] = config.num_hidden_layers
UpperCAmelCase_ : List[Any] = DeeRobertaModel(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase_ : Tuple = nn.Linear(config.hidden_size ,self.config.num_labels )
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=-1 ,_SCREAMING_SNAKE_CASE=False ,) -> int:
UpperCAmelCase_ : Union[str, Any] = self.num_layers
try:
UpperCAmelCase_ : Any = self.roberta(
_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,token_type_ids=_SCREAMING_SNAKE_CASE ,position_ids=_SCREAMING_SNAKE_CASE ,head_mask=_SCREAMING_SNAKE_CASE ,inputs_embeds=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Optional[Any] = outputs[1]
UpperCAmelCase_ : Optional[Any] = self.dropout(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = self.classifier(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCAmelCase_ : Tuple = e.message
UpperCAmelCase_ : Optional[int] = e.exit_layer
UpperCAmelCase_ : str = outputs[0]
if not self.training:
UpperCAmelCase_ : int = entropy(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ : List[str] = MSELoss()
UpperCAmelCase_ : Union[str, Any] = loss_fct(logits.view(-1 ) ,labels.view(-1 ) )
else:
UpperCAmelCase_ : str = CrossEntropyLoss()
UpperCAmelCase_ : Union[str, Any] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
# work with highway exits
UpperCAmelCase_ : Union[str, Any] = []
for highway_exit in outputs[-1]:
UpperCAmelCase_ : Any = highway_exit[0]
if not self.training:
highway_logits_all.append(_SCREAMING_SNAKE_CASE )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ : List[str] = MSELoss()
UpperCAmelCase_ : List[Any] = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) )
else:
UpperCAmelCase_ : Optional[int] = CrossEntropyLoss()
UpperCAmelCase_ : Optional[Any] = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
highway_losses.append(_SCREAMING_SNAKE_CASE )
if train_highway:
UpperCAmelCase_ : str = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCAmelCase_ : List[Any] = (loss,) + outputs
if not self.training:
UpperCAmelCase_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCAmelCase_ : Optional[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy | 30 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[Any] = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 73 | 0 |
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), f"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE_ = f"The input value of [n={number}] has to be > 0"
raise ValueError(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = sylvester(number - 1 )
SCREAMING_SNAKE_CASE_ = num - 1
SCREAMING_SNAKE_CASE_ = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''') | 31 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Union[str, Any] = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _snake_case ( A__ ):
_lowercase : Optional[Any] = '''decision_transformer'''
_lowercase : str = ['''past_key_values''']
_lowercase : Union[str, Any] = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a=17 , a=4 , a=128 , a=4096 , a=True , a=1 , a=1024 , a=3 , a=1 , a=None , a="relu" , a=0.1 , a=0.1 , a=0.1 , a=1E-5 , a=0.02 , a=True , a=True , a=5_0256 , a=5_0256 , a=False , a=False , **a , ) -> List[str]:
SCREAMING_SNAKE_CASE = state_dim
SCREAMING_SNAKE_CASE = act_dim
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = max_ep_len
SCREAMING_SNAKE_CASE = action_tanh
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = n_inner
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = attn_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scale_attn_weights
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(bos_token_id=a , eos_token_id=a , **a)
| 73 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def A__ ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int = 16 ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE_ : Dict ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE_ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase = 8
else:
_UpperCAmelCase = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding='''longest''' , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase_ = mocked_dataloaders # noqa: F811
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , SCREAMING_SNAKE_CASE_ ) == "1":
_UpperCAmelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
_UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
_UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config['''lr''']
_UpperCAmelCase = int(config['''num_epochs'''] )
_UpperCAmelCase = int(config['''seed'''] )
_UpperCAmelCase = int(config['''batch_size'''] )
set_seed(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase , _UpperCAmelCase = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
_UpperCAmelCase = os.path.split(SCREAMING_SNAKE_CASE_ )[-1].split('''.''' )[0]
accelerator.init_trackers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
_UpperCAmelCase = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
_UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
_UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(SCREAMING_SNAKE_CASE_ ),
'''epoch''': epoch,
} , step=SCREAMING_SNAKE_CASE_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def A__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=SCREAMING_SNAKE_CASE_ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 32 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ : Optional[int] = 16
a_ : Any = 32
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 16):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('bert-base-cased')
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc')
def tokenize_function(_UpperCAmelCase):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(_UpperCAmelCase):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE = 8
else:
SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
_UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
# Initialize accelerator
SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE = config['lr']
SCREAMING_SNAKE_CASE = int(config['num_epochs'])
SCREAMING_SNAKE_CASE = int(config['seed'])
SCREAMING_SNAKE_CASE = int(config['batch_size'])
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc')
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE = model.to(accelerator.device)
# Instantiate optimizer
SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=_UpperCAmelCase)
# Instantiate scheduler
SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# Now we train the model
for epoch in range(_UpperCAmelCase):
model.train()
for step, batch in enumerate(_UpperCAmelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.loss
SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _UpperCAmelCase)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
main()
| 73 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
if len(__lowerCAmelCase ) < k or k < 0:
raise ValueError('''Invalid Input''' )
snake_case__ = snake_case__ = sum(array[:k] )
for i in range(len(__lowerCAmelCase ) - k ):
snake_case__ = current_sum - array[i] + array[i + k]
snake_case__ = max(__lowerCAmelCase , __lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowerCamelCase__ : List[Any] = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0_0)]
lowerCamelCase__ : List[Any] = randint(0, 1_1_0)
print(F"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 33 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : int = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 73 | 0 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = int(_lowercase )
if n_element < 1:
UpperCamelCase = ValueError('''a should be a positive number''' )
raise my_error
UpperCamelCase = [1]
UpperCamelCase , UpperCamelCase , UpperCamelCase = (0, 0, 0)
UpperCamelCase = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 ,hamming_list[j] * 3 ,hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
SCREAMING_SNAKE_CASE_ = hamming(int(n))
print('-----------------------------------------------------')
print(f'The list with nth numbers is: {hamming_numbers}')
print('-----------------------------------------------------') | 34 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False):
if radian_mode:
return [magnitude * cos(_UpperCAmelCase), magnitude * sin(_UpperCAmelCase)]
return [magnitude * cos(radians(_UpperCAmelCase)), magnitude * sin(radians(_UpperCAmelCase))]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 10**-1):
SCREAMING_SNAKE_CASE = cross(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = sum(_UpperCAmelCase)
return abs(_UpperCAmelCase) < eps
if __name__ == "__main__":
# Test to check if it works
a_ : int = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
a_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
a_ : Dict = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
a_ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
a_ : int = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
a_ : Optional[Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 | 0 |
import fire
from utils import calculate_rouge, save_json
def a ( A__ , A__ , A__=None , **A__ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = [x.strip() for x in open(A__ ).readlines()]
SCREAMING_SNAKE_CASE__ : int = [x.strip() for x in open(A__ ).readlines()][: len(A__ )]
SCREAMING_SNAKE_CASE__ : Optional[Any] = calculate_rouge(A__ , A__ , **A__ )
if save_path is not None:
save_json(A__ , A__ , indent=A__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 35 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : int = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class _snake_case ( A__ ):
_lowercase : Dict = '''cvt'''
def __init__( self , a=3 , a=[7, 3, 3] , a=[4, 2, 2] , a=[2, 1, 1] , a=[64, 192, 384] , a=[1, 3, 6] , a=[1, 2, 10] , a=[4.0, 4.0, 4.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.1] , a=[True, True, True] , a=[False, False, True] , a=["dw_bn", "dw_bn", "dw_bn"] , a=[3, 3, 3] , a=[1, 1, 1] , a=[2, 2, 2] , a=[1, 1, 1] , a=[1, 1, 1] , a=0.02 , a=1E-12 , **a , ) -> List[Any]:
super().__init__(**a)
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = patch_stride
SCREAMING_SNAKE_CASE = patch_padding
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = depth
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = attention_drop_rate
SCREAMING_SNAKE_CASE = drop_rate
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = cls_token
SCREAMING_SNAKE_CASE = qkv_projection_method
SCREAMING_SNAKE_CASE = kernel_qkv
SCREAMING_SNAKE_CASE = padding_kv
SCREAMING_SNAKE_CASE = stride_kv
SCREAMING_SNAKE_CASE = padding_q
SCREAMING_SNAKE_CASE = stride_q
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
| 73 | 0 |
def lowercase ( __A : int ) -> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
snake_case : Tuple = 1
snake_case : Union[str, Any] = 1
while repunit:
snake_case : Tuple = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowercase ( __A : int = 100_0000 ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__A ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 36 |
def lowerCamelCase__ (_UpperCAmelCase = 10 , _UpperCAmelCase = 1000 , _UpperCAmelCase = True):
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase)
and isinstance(_UpperCAmelCase , _UpperCAmelCase)
and isinstance(_UpperCAmelCase , _UpperCAmelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)')
return min_val if option else max_val
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return int((number_a + number_a) / 2)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)')
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value')
def answer(_UpperCAmelCase) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...')
SCREAMING_SNAKE_CASE = lower
SCREAMING_SNAKE_CASE = higher
SCREAMING_SNAKE_CASE = []
while True:
SCREAMING_SNAKE_CASE = get_avg(_UpperCAmelCase , _UpperCAmelCase)
last_numbers.append(_UpperCAmelCase)
if answer(_UpperCAmelCase) == "low":
SCREAMING_SNAKE_CASE = number
elif answer(_UpperCAmelCase) == "high":
SCREAMING_SNAKE_CASE = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''')
print(F'''details : {last_numbers!s}''')
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = int(input('Enter lower value : ').strip())
SCREAMING_SNAKE_CASE = int(input('Enter high value : ').strip())
SCREAMING_SNAKE_CASE = int(input('Enter value to guess : ').strip())
guess_the_number(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
main()
| 73 | 0 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCamelCase_ ( __a ) -> Dict:
if is_torch_version("<" , "2.0.0" ) or not hasattr(__a , "_dynamo" ):
return False
return isinstance(__a , torch._dynamo.eval_frame.OptimizedModule )
def UpperCamelCase_ ( __a , __a = True ) -> Tuple:
a__ : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
a__ : List[Any] = is_compiled_module(__a )
if is_compiled:
a__ : Optional[int] = model
a__ : List[str] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__a , __a ):
a__ : int = model.module
if not keep_fpaa_wrapper:
a__ : Union[str, Any] = getattr(__a , "forward" )
a__ : Union[str, Any] = model.__dict__.pop("_original_forward" , __a )
if original_forward is not None:
while hasattr(__a , "__wrapped__" ):
a__ : int = forward.__wrapped__
if forward == original_forward:
break
a__ : Any = forward
if getattr(__a , "_converted_to_transformer_engine" , __a ):
convert_model(__a , to_transformer_engine=__a )
if is_compiled:
a__ : List[str] = model
a__ : Optional[int] = compiled_model
return model
def UpperCamelCase_ ( ) -> int:
PartialState().wait_for_everyone()
def UpperCamelCase_ ( __a , __a ) -> int:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__a , __a )
elif PartialState().local_process_index == 0:
torch.save(__a , __a )
@contextmanager
def UpperCamelCase_ ( **__a ) -> Optional[int]:
for key, value in kwargs.items():
a__ : int = str(__a )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCamelCase_ ( __a ) -> Dict:
if not hasattr(__a , "__qualname__" ) and not hasattr(__a , "__name__" ):
a__ : Union[str, Any] = getattr(__a , "__class__" , __a )
if hasattr(__a , "__qualname__" ):
return obj.__qualname__
if hasattr(__a , "__name__" ):
return obj.__name__
return str(__a )
def UpperCamelCase_ ( __a , __a ) -> str:
for key, value in source.items():
if isinstance(__a , __a ):
a__ : Any = destination.setdefault(__a , {} )
merge_dicts(__a , __a )
else:
a__ : List[str] = value
return destination
def UpperCamelCase_ ( __a = None ) -> bool:
if port is None:
a__ : int = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 37 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _snake_case :
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=False , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , use_stable_embedding=a , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Any:
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , )
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> int:
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a)
model.to(a)
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size)
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1)
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1)
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1]).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3))
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( A__ , A__ , A__ , unittest.TestCase ):
_lowercase : List[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_lowercase : str = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_lowercase : List[str] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : List[str] = False
_lowercase : Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 10] , config.vocab_size)
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
original_model.to(a)
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
scaled_model.to(a)
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5))
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5))
| 73 | 0 |
'''simple docstring'''
import os
def UpperCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
with open(os.path.dirname(__magic_name__ ) + """/grid.txt""" ) as f:
snake_case__ : List[str] = [] # noqa: E741
for _ in range(20 ):
l.append([int(__magic_name__ ) for x in f.readline().split()] )
snake_case__ : Optional[int] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case__ : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case__ : List[Any] = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case__ : int = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case__ : List[str] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case__ : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case__ : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
snake_case__ : Optional[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case__ : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 38 |
from __future__ import annotations
a_ : str = []
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
for i in range(len(_UpperCAmelCase)):
if board[row][i] == 1:
return False
for i in range(len(_UpperCAmelCase)):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , -1 , -1)):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1) , range(_UpperCAmelCase , len(_UpperCAmelCase))):
if board[i][j] == 1:
return False
return True
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
if row >= len(_UpperCAmelCase):
solution.append(_UpperCAmelCase)
printboard(_UpperCAmelCase)
print()
return True
for i in range(len(_UpperCAmelCase)):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 1
solve(_UpperCAmelCase , row + 1)
SCREAMING_SNAKE_CASE = 0
return False
def lowerCamelCase__ (_UpperCAmelCase):
for i in range(len(_UpperCAmelCase)):
for j in range(len(_UpperCAmelCase)):
if board[i][j] == 1:
print('Q' , end=' ')
else:
print('.' , end=' ')
print()
# n=int(input("The no. of queens"))
a_ : Tuple = 8
a_ : int = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 73 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''sentencepiece.model'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
lowerCAmelCase_ = {
'''google/rembert''': 2_56,
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Tuple=False , _UpperCamelCase : Dict=True , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple="[CLS]" , _UpperCamelCase : Dict="[SEP]" , _UpperCamelCase : Tuple="[UNK]" , _UpperCamelCase : List[str]="[SEP]" , _UpperCamelCase : Tuple="[PAD]" , _UpperCamelCase : Optional[Any]="[CLS]" , _UpperCamelCase : List[str]="[MASK]" , **_UpperCamelCase : List[Any] , ) ->Tuple:
super().__init__(
do_lower_case=_UpperCamelCase , remove_space=_UpperCamelCase , keep_accents=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = do_lower_case
snake_case_ = remove_space
snake_case_ = keep_accents
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor()
self.sp_model.Load(_UpperCamelCase )
@property
def snake_case__( self : List[Any] ) ->Dict:
return len(self.sp_model )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) ->Optional[Any]:
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Any , _UpperCamelCase : List[Any] ) ->Optional[Any]:
snake_case_ = d
snake_case_ = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def snake_case__( self : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Any=False ) ->List[Any]:
snake_case_ = self.sp_model.EncodeAsPieces(_UpperCamelCase )
return pieces
def snake_case__( self : str , _UpperCamelCase : Optional[int] ) ->Tuple:
return self.sp_model.PieceToId(_UpperCamelCase )
def snake_case__( self : Tuple , _UpperCamelCase : int ) ->List[str]:
return self.sp_model.IdToPiece(_UpperCamelCase )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Dict ) ->Dict:
snake_case_ = self.sp_model.decode_pieces(_UpperCamelCase )
return out_string
def snake_case__( self : Any , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1]
def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_UpperCamelCase ) )
return
snake_case_ = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ):
copyfile(self.vocab_file , _UpperCamelCase )
return (out_vocab_file,) | 39 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : List[Any] = StableDiffusionDiffEditPipeline
_lowercase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
_lowercase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
_lowercase : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowercase : List[str] = frozenset([] )
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
SCREAMING_SNAKE_CASE = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_zero=a , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(a)
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 16, 16) , rng=random.Random(a)).to(a)
SCREAMING_SNAKE_CASE = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a)).to(a)
if str(a).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a)
SCREAMING_SNAKE_CASE = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> List[Any]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a)
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB')
if str(a).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a)
SCREAMING_SNAKE_CASE = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self , a , a=0) -> Optional[int]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a)
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(a)).convert('RGB')
if str(a).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a)
SCREAMING_SNAKE_CASE = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
if not hasattr(self.pipeline_class , '_optional_components'):
return
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a , a , a)
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components})
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a)
SCREAMING_SNAKE_CASE = pipe(**a)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a)
SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(a)
pipe_loaded.to(a)
pipe_loaded.set_progress_bar_config(disable=a)
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a , a) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a)
SCREAMING_SNAKE_CASE = pipe_loaded(**a)[0]
SCREAMING_SNAKE_CASE = np.abs(output - output_loaded).max()
self.assertLess(a , 1E-4)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_dummy_mask_inputs(a)
SCREAMING_SNAKE_CASE = pipe.generate_mask(**a)
SCREAMING_SNAKE_CASE = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16))
SCREAMING_SNAKE_CASE = np.array([0] * 9)
SCREAMING_SNAKE_CASE = np.abs(mask_slice.flatten() - expected_slice).max()
self.assertLessEqual(a , 1E-3)
self.assertEqual(mask[0, -3, -4] , 0)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(a)
SCREAMING_SNAKE_CASE = pipe.invert(**a).images
SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
SCREAMING_SNAKE_CASE = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(a , 1E-3)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=5E-3)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler(**a)
SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler(**a)
SCREAMING_SNAKE_CASE = self.pipeline_class(**a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(a)
SCREAMING_SNAKE_CASE = pipe.invert(**a).images
SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
SCREAMING_SNAKE_CASE = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(a , 1E-3)
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls) -> List[Any]:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png')
SCREAMING_SNAKE_CASE = raw_image.convert('RGB').resize((768, 768))
SCREAMING_SNAKE_CASE = raw_image
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = torch.manual_seed(0)
SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE = DDIMScheduler.from_config(pipe.scheduler.config)
SCREAMING_SNAKE_CASE = DDIMInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = 'a bowl of fruit'
SCREAMING_SNAKE_CASE = 'a bowl of pears'
SCREAMING_SNAKE_CASE = pipe.generate_mask(
image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , )
SCREAMING_SNAKE_CASE = pipe.invert(
prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a).latents
SCREAMING_SNAKE_CASE = pipe(
prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
SCREAMING_SNAKE_CASE = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png').resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = torch.manual_seed(0)
SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=a , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = 'a bowl of fruit'
SCREAMING_SNAKE_CASE = 'a bowl of pears'
SCREAMING_SNAKE_CASE = pipe.generate_mask(
image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , )
SCREAMING_SNAKE_CASE = pipe.invert(
prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a , num_inference_steps=25 , ).latents
SCREAMING_SNAKE_CASE = pipe(
prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
SCREAMING_SNAKE_CASE = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png').resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
| 73 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Any = "nllb-moe"
UpperCAmelCase__ : Tuple = ["past_key_values"]
UpperCAmelCase__ : Any = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self, SCREAMING_SNAKE_CASE_=12_8112, SCREAMING_SNAKE_CASE_=1024, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=4096, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=4096, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=0.05, SCREAMING_SNAKE_CASE_=0.05, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="relu", SCREAMING_SNAKE_CASE_=1024, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_="float32", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=128, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=0.0_01, SCREAMING_SNAKE_CASE_=0.0_01, SCREAMING_SNAKE_CASE_="all", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=1.0, SCREAMING_SNAKE_CASE_=0.2, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_, ) -> List[Any]:
UpperCamelCase : List[Any] = vocab_size
UpperCamelCase : Union[str, Any] = max_position_embeddings
UpperCamelCase : Dict = d_model
UpperCamelCase : List[str] = encoder_ffn_dim
UpperCamelCase : Any = encoder_layers
UpperCamelCase : List[Any] = encoder_attention_heads
UpperCamelCase : Optional[int] = decoder_ffn_dim
UpperCamelCase : Union[str, Any] = decoder_layers
UpperCamelCase : Union[str, Any] = decoder_attention_heads
UpperCamelCase : List[str] = dropout
UpperCamelCase : Dict = attention_dropout
UpperCamelCase : Optional[int] = activation_dropout
UpperCamelCase : Optional[int] = activation_function
UpperCamelCase : Any = init_std
UpperCamelCase : Any = encoder_layerdrop
UpperCamelCase : str = decoder_layerdrop
UpperCamelCase : Optional[Any] = use_cache
UpperCamelCase : List[str] = encoder_layers
UpperCamelCase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase : List[str] = router_z_loss_coef
UpperCamelCase : Dict = router_aux_loss_coef
UpperCamelCase : int = decoder_sparse_step
UpperCamelCase : Dict = encoder_sparse_step
UpperCamelCase : Optional[Any] = num_experts
UpperCamelCase : List[str] = expert_capacity
UpperCamelCase : str = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
UpperCamelCase : Optional[int] = router_dtype
UpperCamelCase : Optional[Any] = router_ignore_padding_tokens
UpperCamelCase : int = batch_prioritized_routing
UpperCamelCase : Union[str, Any] = second_expert_policy
UpperCamelCase : List[Any] = normalize_router_prob_before_dropping
UpperCamelCase : List[str] = moe_eval_capacity_token_fraction
UpperCamelCase : List[str] = moe_token_dropout
UpperCamelCase : int = output_router_logits
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_, bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, is_encoder_decoder=SCREAMING_SNAKE_CASE_, decoder_start_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
| 40 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : Any = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _snake_case ( A__ ):
_lowercase : Optional[int] = '''unispeech'''
def __init__( self , a=32 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=0.1 , a=0.0 , a=0.0 , a=0.1 , a=0.1 , a=0.02 , a=1E-5 , a="group" , a="gelu" , a=(512, 512, 512, 512, 512, 512, 512) , a=(5, 2, 2, 2, 2, 2, 2) , a=(10, 3, 3, 3, 3, 2, 2) , a=False , a=128 , a=16 , a=False , a=True , a=0.05 , a=10 , a=2 , a=0.0 , a=10 , a=0 , a=320 , a=2 , a=0.1 , a=100 , a=256 , a=256 , a=0.1 , a="mean" , a=False , a=False , a=256 , a=80 , a=0 , a=1 , a=2 , a=0.5 , **a , ) -> Optional[int]:
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a)
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = feat_extract_norm
SCREAMING_SNAKE_CASE = feat_extract_activation
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = conv_bias
SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE = len(self.conv_dim)
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = feat_proj_dropout
SCREAMING_SNAKE_CASE = final_dropout
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_ctc_classes
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = do_stable_layer_norm
SCREAMING_SNAKE_CASE = use_weighted_layer_sum
SCREAMING_SNAKE_CASE = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE = apply_spec_augment
SCREAMING_SNAKE_CASE = mask_time_prob
SCREAMING_SNAKE_CASE = mask_time_length
SCREAMING_SNAKE_CASE = mask_time_min_masks
SCREAMING_SNAKE_CASE = mask_feature_prob
SCREAMING_SNAKE_CASE = mask_feature_length
SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE = num_codevectors_per_group
SCREAMING_SNAKE_CASE = num_codevector_groups
SCREAMING_SNAKE_CASE = contrastive_logits_temperature
SCREAMING_SNAKE_CASE = feat_quantizer_dropout
SCREAMING_SNAKE_CASE = num_negatives
SCREAMING_SNAKE_CASE = codevector_dim
SCREAMING_SNAKE_CASE = proj_codevector_dim
SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE = ctc_loss_reduction
SCREAMING_SNAKE_CASE = ctc_zero_infinity
# pretraining loss
SCREAMING_SNAKE_CASE = replace_prob
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 73 | 0 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = F"{sampling_rate}"
__lowercase = '''1'''
__lowercase = '''f32le'''
__lowercase = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(A__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__lowercase = ffmpeg_process.communicate(A__ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
__lowercase = output_stream[0]
__lowercase = np.frombuffer(A__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def _A ( A__ , A__ , A__ = "f32le" , ):
"""simple docstring"""
__lowercase = F"{sampling_rate}"
__lowercase = '''1'''
if format_for_conversion == "s16le":
__lowercase = 2
elif format_for_conversion == "f32le":
__lowercase = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
__lowercase = platform.system()
if system == "Linux":
__lowercase = '''alsa'''
__lowercase = '''default'''
elif system == "Darwin":
__lowercase = '''avfoundation'''
__lowercase = ''':0'''
elif system == "Windows":
__lowercase = '''dshow'''
__lowercase = '''default'''
__lowercase = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
__lowercase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowercase = _ffmpeg_stream(A__ , A__ )
for item in iterator:
yield item
def _A ( A__ , A__ , A__ = None , A__ = None , A__ = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
__lowercase = stream_chunk_s
else:
__lowercase = chunk_length_s
__lowercase = ffmpeg_microphone(A__ , A__ , format_for_conversion=A__ )
if format_for_conversion == "s16le":
__lowercase = np.intaa
__lowercase = 2
elif format_for_conversion == "f32le":
__lowercase = np.floataa
__lowercase = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
__lowercase = chunk_length_s / 6
__lowercase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(A__ , (int, float) ):
__lowercase = [stride_length_s, stride_length_s]
__lowercase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowercase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowercase = datetime.datetime.now()
__lowercase = datetime.timedelta(seconds=A__ )
for item in chunk_bytes_iter(A__ , A__ , stride=(stride_left, stride_right) , stream=A__ ):
# Put everything back in numpy scale
__lowercase = np.frombuffer(item['''raw'''] , dtype=A__ )
__lowercase = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
__lowercase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _A ( A__ , A__ , A__ , A__ = False ):
"""simple docstring"""
__lowercase = b''''''
__lowercase , __lowercase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
__lowercase = 0
for raw in iterator:
acc += raw
if stream and len(A__ ) < chunk_len:
__lowercase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(A__ ) >= chunk_len:
# We are flushing the accumulator
__lowercase = (_stride_left, stride_right)
__lowercase = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
__lowercase = False
yield item
__lowercase = stride_left
__lowercase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(A__ ) > stride_left:
__lowercase = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
__lowercase = False
yield item
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = 2**24 # 16Mo
try:
with subprocess.Popen(A__ , stdout=subprocess.PIPE , bufsize=A__ ) as ffmpeg_process:
while True:
__lowercase = ffmpeg_process.stdout.read(A__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 41 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
a_ : Optional[Any] = re.compile(R'\b(a|an|the)\b', re.UNICODE)
a_ : List[str] = None
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.')
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.')
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.')
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).')
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.')
parser.add_argument(
'--na-prob-thresh' , '-t' , type=_UpperCAmelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=_UpperCAmelCase , help='Save precision-recall curves to directory.')
parser.add_argument('--verbose' , '-v' , action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE = bool(qa['answers']['text'])
return qid_to_has_ans
def lowerCamelCase__ (_UpperCAmelCase):
def remove_articles(_UpperCAmelCase):
return ARTICLES_REGEX.sub(' ' , _UpperCAmelCase)
def white_space_fix(_UpperCAmelCase):
return " ".join(text.split())
def remove_punc(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(_UpperCAmelCase):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase))))
def lowerCamelCase__ (_UpperCAmelCase):
if not s:
return []
return normalize_answer(_UpperCAmelCase).split()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return int(normalize_answer(_UpperCAmelCase) == normalize_answer(_UpperCAmelCase))
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = collections.Counter(_UpperCAmelCase) & collections.Counter(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = sum(common.values())
if len(_UpperCAmelCase) == 0 or len(_UpperCAmelCase) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE = qa['id']
SCREAMING_SNAKE_CASE = [t for t in qa['answers']['text'] if normalize_answer(_UpperCAmelCase)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
SCREAMING_SNAKE_CASE = ['']
if qid not in preds:
print(F'''Missing prediction for {qid}''')
continue
SCREAMING_SNAKE_CASE = preds[qid]
# Take max over all gold answers
SCREAMING_SNAKE_CASE = max(compute_exact(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers)
SCREAMING_SNAKE_CASE = max(compute_fa(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers)
return exact_scores, fa_scores
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
for qid, s in scores.items():
SCREAMING_SNAKE_CASE = na_probs[qid] > na_prob_thresh
if pred_na:
SCREAMING_SNAKE_CASE = float(not qid_to_has_ans[qid])
else:
SCREAMING_SNAKE_CASE = s
return new_scores
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None):
if not qid_list:
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores.values()) / total),
('f1', 1_00.0 * sum(fa_scores.values()) / total),
('total', total),
])
else:
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores[k] for k in qid_list) / total),
('f1', 1_00.0 * sum(fa_scores[k] for k in qid_list) / total),
('total', total),
])
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
for k in new_eval:
SCREAMING_SNAKE_CASE = new_eval[k]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
plt.step(_UpperCAmelCase , _UpperCAmelCase , color='b' , alpha=0.2 , where='post')
plt.fill_between(_UpperCAmelCase , _UpperCAmelCase , step='post' , alpha=0.2 , color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(_UpperCAmelCase)
plt.savefig(_UpperCAmelCase)
plt.clf()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None):
SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k])
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1.0
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = [1.0]
SCREAMING_SNAKE_CASE = [0.0]
SCREAMING_SNAKE_CASE = 0.0
for i, qid in enumerate(_UpperCAmelCase):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
SCREAMING_SNAKE_CASE = true_pos / float(i + 1)
SCREAMING_SNAKE_CASE = true_pos / float(_UpperCAmelCase)
if i == len(_UpperCAmelCase) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_UpperCAmelCase)
recalls.append(_UpperCAmelCase)
if out_image:
plot_pr_curve(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return {"ap": 1_00.0 * avg_prec}
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if out_image_dir and not os.path.exists(_UpperCAmelCase):
os.makedirs(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_exact.png') , title='Precision-Recall curve for Exact Match score' , )
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_f1.png') , title='Precision-Recall curve for F1 score' , )
SCREAMING_SNAKE_CASE = {k: float(_UpperCAmelCase) for k, v in qid_to_has_ans.items()}
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_oracle.png') , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_exact')
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_f1')
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_oracle')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if not qid_list:
return
SCREAMING_SNAKE_CASE = [na_probs[k] for k in qid_list]
SCREAMING_SNAKE_CASE = np.ones_like(_UpperCAmelCase) / float(len(_UpperCAmelCase))
plt.hist(_UpperCAmelCase , weights=_UpperCAmelCase , bins=20 , range=(0.0, 1.0))
plt.xlabel('Model probability of no-answer')
plt.ylabel('Proportion of dataset')
plt.title(F'''Histogram of no-answer probability: {name}''')
plt.savefig(os.path.join(_UpperCAmelCase , F'''na_prob_hist_{name}.png'''))
plt.clf()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
SCREAMING_SNAKE_CASE = num_no_ans
SCREAMING_SNAKE_CASE = cur_score
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k])
for i, qid in enumerate(_UpperCAmelCase):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
SCREAMING_SNAKE_CASE = scores[qid]
else:
if preds[qid]:
SCREAMING_SNAKE_CASE = -1
else:
SCREAMING_SNAKE_CASE = 0
cur_score += diff
if cur_score > best_score:
SCREAMING_SNAKE_CASE = cur_score
SCREAMING_SNAKE_CASE = na_probs[qid]
return 1_00.0 * best_score / len(_UpperCAmelCase), best_thresh
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = best_exact
SCREAMING_SNAKE_CASE = exact_thresh
SCREAMING_SNAKE_CASE = best_fa
SCREAMING_SNAKE_CASE = fa_thresh
def lowerCamelCase__ ():
with open(OPTS.data_file) as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = dataset_json['data']
with open(OPTS.pred_file) as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase)
else:
SCREAMING_SNAKE_CASE = {k: 0.0 for k in preds}
SCREAMING_SNAKE_CASE = make_qid_to_has_ans(_UpperCAmelCase) # maps qid to True/False
SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if v]
SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if not v]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_raw_scores(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh)
SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh)
SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase)
if has_ans_qids:
SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase)
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'HasAns')
if no_ans_qids:
SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase)
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'NoAns')
if OPTS.na_prob_file:
find_all_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir)
histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'hasAns')
histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'noAns')
if OPTS.out_file:
with open(OPTS.out_file , 'w') as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
else:
print(json.dumps(_UpperCAmelCase , indent=2))
if __name__ == "__main__":
a_ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 73 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A_ = 500_000
A_ , A_ = os.path.split(__file__)
A_ = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def _UpperCamelCase ( __UpperCamelCase ,**__UpperCamelCase ) -> int:
lowerCamelCase_ = dataset.map(**__UpperCamelCase )
@get_duration
def _UpperCamelCase ( __UpperCamelCase ,**__UpperCamelCase ) -> Any:
lowerCamelCase_ = dataset.filter(**__UpperCamelCase )
def _UpperCamelCase ( ) -> List[str]:
lowerCamelCase_ = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
lowerCamelCase_ = generate_example_dataset(
os.path.join(__UpperCamelCase ,'dataset.arrow' ) ,__UpperCamelCase ,num_examples=__UpperCamelCase )
lowerCamelCase_ = transformers.AutoTokenizer.from_pretrained('bert-base-cased' ,use_fast=__UpperCamelCase )
def tokenize(__UpperCamelCase ):
return tokenizer(examples['text'] )
lowerCamelCase_ = map(__UpperCamelCase )
lowerCamelCase_ = map(__UpperCamelCase ,batched=__UpperCamelCase )
lowerCamelCase_ = map(__UpperCamelCase ,function=lambda __UpperCamelCase : None ,batched=__UpperCamelCase )
with dataset.formatted_as(type='numpy' ):
lowerCamelCase_ = map(__UpperCamelCase ,function=lambda __UpperCamelCase : None ,batched=__UpperCamelCase )
with dataset.formatted_as(type='pandas' ):
lowerCamelCase_ = map(__UpperCamelCase ,function=lambda __UpperCamelCase : None ,batched=__UpperCamelCase )
with dataset.formatted_as(type='torch' ,columns='numbers' ):
lowerCamelCase_ = map(__UpperCamelCase ,function=lambda __UpperCamelCase : None ,batched=__UpperCamelCase )
with dataset.formatted_as(type='tensorflow' ,columns='numbers' ):
lowerCamelCase_ = map(__UpperCamelCase ,function=lambda __UpperCamelCase : None ,batched=__UpperCamelCase )
lowerCamelCase_ = map(__UpperCamelCase ,function=__UpperCamelCase ,batched=__UpperCamelCase )
lowerCamelCase_ = filter(__UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__UpperCamelCase ,'wb' ) as f:
f.write(json.dumps(__UpperCamelCase ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 42 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ : Dict = logging.get_logger(__name__)
class _snake_case ( A__ ):
def __init__( self , *a , **a) -> None:
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , a , )
super().__init__(*a , **a)
| 73 | 0 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
lowercase__ = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
lowercase__ = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
lowercase__ = max(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE ) , b_binary.zfill(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _snake_case ( unittest.TestCase , A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = load_tool('text-classification')
self.tool.setup()
SCREAMING_SNAKE_CASE = load_tool('text-classification' , remote=a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(a , 'positive')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(a , 'positive')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(a , 'positive')
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(a , 'positive')
| 73 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCAmelCase_ : Optional[Any] = '\\n\n'
UpperCAmelCase_ : Optional[int] = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
UpperCAmelCase_ : List[Any] = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def lowerCamelCase_ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ),reference_urls=["https://huggingface.co/docs/transformers/perplexity"],)
def lowerCamelCase_ ( self : List[str],__A : Dict,__A : List[Any],__A : int = 1_6,__A : bool = True,__A : int=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_lowerCamelCase : int = "cuda"
else:
_lowerCamelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
_lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained(__A )
_lowerCamelCase : Tuple = model.to(__A )
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(__A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_lowerCamelCase : Tuple = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_lowerCamelCase : int = model.config.max_length - 1
else:
_lowerCamelCase : Tuple = model.config.max_length
_lowerCamelCase : int = tokenizer(
__A,add_special_tokens=__A,padding=__A,truncation=__A,max_length=__A,return_tensors="pt",return_attention_mask=__A,).to(__A )
_lowerCamelCase : Optional[Any] = encodings["input_ids"]
_lowerCamelCase : Dict = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ),1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ),2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_lowerCamelCase : str = []
_lowerCamelCase : str = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0,len(__A ),__A ) ):
_lowerCamelCase : int = min(start_index + batch_size,len(__A ) )
_lowerCamelCase : int = encoded_texts[start_index:end_index]
_lowerCamelCase : Optional[Any] = attn_masks[start_index:end_index]
if add_start_token:
_lowerCamelCase : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__A )
_lowerCamelCase : Optional[int] = torch.cat([bos_tokens_tensor, encoded_batch],dim=1 )
_lowerCamelCase : List[Any] = torch.cat(
[torch.ones(bos_tokens_tensor.size(),dtype=torch.intaa ).to(__A ), attn_mask],dim=1 )
_lowerCamelCase : Dict = encoded_batch
with torch.no_grad():
_lowerCamelCase : str = model(__A,attention_mask=__A ).logits
_lowerCamelCase : List[str] = out_logits[..., :-1, :].contiguous()
_lowerCamelCase : Optional[int] = labels[..., 1:].contiguous()
_lowerCamelCase : Dict = attn_mask[..., 1:].contiguous()
_lowerCamelCase : str = torch.expa(
(loss_fct(shift_logits.transpose(1,2 ),__A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__A )} | 44 |
import sys
import turtle
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
if depth == 0:
return
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
a_ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
a_ : str = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 73 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCamelCase = TaTokenizerFast
UpperCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCamelCase = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
) | 45 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a_ : Any = 'true'
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=16):
set_seed(42)
SCREAMING_SNAKE_CASE = RegressionModel()
SCREAMING_SNAKE_CASE = deepcopy(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = RegressionDataset(length=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase)
model.to(accelerator.device)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase)
return model, ddp_model, dataloader
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased')
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' , split='validation')
def tokenize_function(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase)
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(_UpperCAmelCase):
if use_longest:
return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt')
return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt')
return DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=16)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = Accelerator(dispatch_batches=_UpperCAmelCase , split_batches=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = get_dataloader(_UpperCAmelCase , not dispatch_batches)
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
for batch in dataloader:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCAmelCase)
targs.append(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.cat(_UpperCAmelCase), torch.cat(_UpperCAmelCase)
return logits, targs
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=16):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_basic_setup(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_predictions(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
assert (
len(_UpperCAmelCase) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCAmelCase)}'''
def lowerCamelCase__ (_UpperCAmelCase = False , _UpperCAmelCase = False):
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_mrpc_setup(_UpperCAmelCase , _UpperCAmelCase)
# First do baseline
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['no']
model.to(_UpperCAmelCase)
model.eval()
for batch in dataloader:
batch.to(_UpperCAmelCase)
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=_UpperCAmelCase , references=batch['labels'])
SCREAMING_SNAKE_CASE = metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
SCREAMING_SNAKE_CASE = batch['labels']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=_UpperCAmelCase , references=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key]), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''')
test_mrpc(_UpperCAmelCase , _UpperCAmelCase)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase)
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''')
test_torch_metrics(_UpperCAmelCase , 99)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**')
SCREAMING_SNAKE_CASE = Accelerator()
test_torch_metrics(_UpperCAmelCase , 512)
accelerator.state._reset_state()
def lowerCamelCase__ (_UpperCAmelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 73 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
def get_matched_characters(_lowerCamelCase , _lowerCamelCase ) -> str:
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[str] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_lowerCamelCase : str = int(max(0 , i - limit ) )
_lowerCamelCase : int = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_lowerCamelCase )
_lowerCamelCase : Optional[int] = F"""{_stra[0:_stra.index(_lowerCamelCase )]} {_stra[_stra.index(_lowerCamelCase ) + 1:]}"""
return "".join(_lowerCamelCase )
# matching characters
_lowerCamelCase : Tuple = get_matched_characters(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Union[str, Any] = get_matched_characters(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Dict = len(_lowerCamelCase )
# transposition
_lowerCamelCase : Dict = (
len([(ca, ca) for ca, ca in zip(_lowerCamelCase , _lowerCamelCase ) if ca != ca] ) // 2
)
if not match_count:
_lowerCamelCase : int = 0.0
else:
_lowerCamelCase : Optional[int] = (
1
/ 3
* (
match_count / len(_lowerCamelCase )
+ match_count / len(_lowerCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_lowerCamelCase : Tuple = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world''')) | 46 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a_ : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 73 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _UpperCamelCase( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE : str = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModel)
class _UpperCamelCase( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE : Tuple = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class _UpperCamelCase( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE : Tuple = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class _UpperCamelCase( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class _UpperCamelCase( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE : List[str] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class _UpperCamelCase( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE : int = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class _UpperCamelCase( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE : int = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class _UpperCamelCase( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE : Any = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class _UpperCamelCase( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE : Any = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class _UpperCamelCase( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class _UpperCamelCase( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE : str = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class _UpperCamelCase( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE : List[str] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class _UpperCamelCase( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE : List[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE__ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 47 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
a_ : str = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def lowerCamelCase__ (_UpperCAmelCase=True):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A__ ) )
class _snake_case ( A__ ):
_lowercase : Optional[Any] = None
_lowercase : Optional[Any] = None
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Optional[Any]:
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = dataset_module_factory(a , cache_dir=a)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=a)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a).replace(os.sep , '/'),
config.DATASET_INFO_FILENAME,
])
SCREAMING_SNAKE_CASE = cached_path(a , cache_dir=a)
self.assertTrue(os.path.exists(a))
@pytest.mark.integration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('test_hf_gcp') / 'test_wikipedia_simple'
SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
SCREAMING_SNAKE_CASE = None
builder_instance.download_and_prepare()
SCREAMING_SNAKE_CASE = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_UpperCAmelCase , _UpperCAmelCase)
assert "train" in ds
assert isinstance(ds['train'] , _UpperCAmelCase)
assert next(iter(ds['train']))
| 73 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Optional[int] = ['image_processor', 'tokenizer']
snake_case__ :List[str] = 'BlipImageProcessor'
snake_case__ :Any = 'AutoTokenizer'
def __init__( self : Any , __magic_name__ : str , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = False
super().__init__(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = self.image_processor
def __call__( self : Any , __magic_name__ : ImageInput = None , __magic_name__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __magic_name__ : bool = True , __magic_name__ : Union[bool, str, PaddingStrategy] = False , __magic_name__ : Union[bool, str, TruncationStrategy] = None , __magic_name__ : Optional[int] = None , __magic_name__ : int = 0 , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : str , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
lowerCAmelCase__ = self.tokenizer
lowerCAmelCase__ = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
return text_encoding
# add pixel_values
lowerCAmelCase__ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ )
if text is not None:
lowerCAmelCase__ = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
else:
lowerCAmelCase__ = None
if text_encoding is not None:
encoding_image_processor.update(__magic_name__ )
return encoding_image_processor
def __SCREAMING_SNAKE_CASE ( self : List[str] , *__magic_name__ : Optional[int] , **__magic_name__ : Optional[int] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , *__magic_name__ : Any , **__magic_name__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer.model_input_names
lowerCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 48 |
from __future__ import annotations
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_UpperCAmelCase)
if n > 1:
factors.append(_UpperCAmelCase)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = "bert-generation"
def __init__( self : Any , _lowercase : List[Any]=5_03_58 , _lowercase : str=10_24 , _lowercase : str=24 , _lowercase : int=16 , _lowercase : Any=40_96 , _lowercase : Union[str, Any]="gelu" , _lowercase : str=0.1 , _lowercase : Union[str, Any]=0.1 , _lowercase : Optional[Any]=5_12 , _lowercase : str=0.02 , _lowercase : Dict=1E-12 , _lowercase : int=0 , _lowercase : Optional[int]=2 , _lowercase : Dict=1 , _lowercase : Dict="absolute" , _lowercase : Tuple=True , **_lowercase : str , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
| 49 |
import math
import os
import sys
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = ''
try:
with open(_UpperCAmelCase , 'rb') as binary_file:
SCREAMING_SNAKE_CASE = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lexicon.pop(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = last_match_id
if math.loga(_UpperCAmelCase).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE = '0' + lexicon[curr_key]
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = '', ''
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
for i in range(len(_UpperCAmelCase)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
index += 1
SCREAMING_SNAKE_CASE = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
return result
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = os.path.getsize(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 8
try:
with open(_UpperCAmelCase , 'wb') as opened_file:
SCREAMING_SNAKE_CASE = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCAmelCase) , _UpperCAmelCase)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append('10000000')
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array:
opened_file.write(int(_UpperCAmelCase , 2).to_bytes(1 , byteorder='big'))
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = read_file_binary(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = compress_data(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = add_file_length(_UpperCAmelCase , _UpperCAmelCase)
write_file_binary(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 73 | 0 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A__ ( __lowerCAmelCase : int = 5000 ):
lowerCamelCase__ = [(i * (3 * i - 1)) // 2 for i in range(1 , __lowerCAmelCase )]
for i, pentagonal_i in enumerate(__lowerCAmelCase ):
for j in range(__lowerCAmelCase , len(__lowerCAmelCase ) ):
lowerCamelCase__ = pentagonal_nums[j]
lowerCamelCase__ = pentagonal_i + pentagonal_j
lowerCamelCase__ = pentagonal_j - pentagonal_i
if is_pentagonal(__lowerCAmelCase ) and is_pentagonal(__lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(F'{solution() = }')
| 50 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase__ (_UpperCAmelCase):
return 1.0 / (1.0 + np.exp(-_outputs))
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = np.max(_outputs , axis=-1 , keepdims=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = np.exp(_outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_UpperCAmelCase)
class _snake_case ( A__ ):
_lowercase : Tuple = '''sigmoid'''
_lowercase : List[str] = '''softmax'''
_lowercase : Tuple = '''none'''
@add_end_docstrings(
A__ , R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class _snake_case ( A__ ):
_lowercase : Optional[Any] = False
_lowercase : Tuple = ClassificationFunction.NONE
def __init__( self , **a) -> Optional[Any]:
super().__init__(**a)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
def SCREAMING_SNAKE_CASE__ ( self , a=None , a=None , a="" , **a) -> Tuple:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
SCREAMING_SNAKE_CASE = tokenizer_kwargs
SCREAMING_SNAKE_CASE = {}
if hasattr(self.model.config , 'return_all_scores') and return_all_scores is None:
SCREAMING_SNAKE_CASE = self.model.config.return_all_scores
if isinstance(a , a) or top_k is None:
SCREAMING_SNAKE_CASE = top_k
SCREAMING_SNAKE_CASE = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , a , )
if return_all_scores:
SCREAMING_SNAKE_CASE = None
else:
SCREAMING_SNAKE_CASE = 1
if isinstance(a , a):
SCREAMING_SNAKE_CASE = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
SCREAMING_SNAKE_CASE = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *a , **a) -> Optional[int]:
SCREAMING_SNAKE_CASE = super().__call__(*a , **a)
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
SCREAMING_SNAKE_CASE = 'top_k' not in kwargs
if isinstance(args[0] , a) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def SCREAMING_SNAKE_CASE__ ( self , a , **a) -> Dict[str, GenericTensor]:
SCREAMING_SNAKE_CASE = self.framework
if isinstance(a , a):
return self.tokenizer(**a , return_tensors=a , **a)
elif isinstance(a , a) and len(a) == 1 and isinstance(inputs[0] , a) and len(inputs[0]) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a , **a)
elif isinstance(a , a):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.')
return self.tokenizer(a , return_tensors=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]:
return self.model(**a)
def SCREAMING_SNAKE_CASE__ ( self , a , a=None , a=1 , a=True) -> Any:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
SCREAMING_SNAKE_CASE = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
SCREAMING_SNAKE_CASE = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply') and function_to_apply is None:
SCREAMING_SNAKE_CASE = self.model.config.function_to_apply
else:
SCREAMING_SNAKE_CASE = ClassificationFunction.NONE
SCREAMING_SNAKE_CASE = model_outputs['logits'][0]
SCREAMING_SNAKE_CASE = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
SCREAMING_SNAKE_CASE = sigmoid(a)
elif function_to_apply == ClassificationFunction.SOFTMAX:
SCREAMING_SNAKE_CASE = softmax(a)
elif function_to_apply == ClassificationFunction.NONE:
SCREAMING_SNAKE_CASE = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''')
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
SCREAMING_SNAKE_CASE = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(a)
]
if not _legacy:
dict_scores.sort(key=lambda a: x["score"] , reverse=a)
if top_k is not None:
SCREAMING_SNAKE_CASE = dict_scores[:top_k]
return dict_scores
| 73 | 0 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Dict ):
UpperCAmelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(a__ ) )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(a__ ) )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(a__ ) )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(a__ ) )
def __snake_case ( self : Dict ):
UpperCAmelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(a__ ) )
def __snake_case ( self : Tuple ):
UpperCAmelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(a__ , variant=a__ ) )
def __snake_case ( self : Tuple ):
UpperCAmelCase = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(a__ , variant=a__ ) )
def __snake_case ( self : Any ):
# pass variant but use the non-variant filenames
UpperCAmelCase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(a__ , variant=a__ ) )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCAmelCase = '''fp16'''
self.assertFalse(is_safetensors_compatible(a__ , variant=a__ ) )
def __snake_case ( self : List[str] ):
UpperCAmelCase = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(a__ , variant=a__ ) )
def __snake_case ( self : str ):
# pass variant but use the non-variant filenames
UpperCAmelCase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(a__ , variant=a__ ) )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertFalse(is_safetensors_compatible(a__ , variant=a__ ) )
| 51 |
import heapq as hq
import math
from collections.abc import Iterator
class _snake_case :
def __init__( self , a) -> Optional[Any]:
SCREAMING_SNAKE_CASE = str(id_)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = {} # {vertex:distance}
def __lt__( self , a) -> Dict:
return self.key < other.key
def __repr__( self) -> Optional[Any]:
return self.id
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]:
self.neighbors.append(a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Tuple:
SCREAMING_SNAKE_CASE = weight
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1])
graph[b - 1].add_neighbor(graph[a - 1])
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCAmelCase)
graph[b - 1].add_edge(graph[a - 1] , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
for u in graph:
SCREAMING_SNAKE_CASE = math.inf
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = graph[:]
while q:
SCREAMING_SNAKE_CASE = min(_UpperCAmelCase)
q.remove(_UpperCAmelCase)
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE = u
SCREAMING_SNAKE_CASE = u.edges[v.id]
for i in range(1 , len(_UpperCAmelCase)):
a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1))
return a
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
for u in graph:
SCREAMING_SNAKE_CASE = math.inf
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = list(_UpperCAmelCase)
hq.heapify(_UpperCAmelCase)
while h:
SCREAMING_SNAKE_CASE = hq.heappop(_UpperCAmelCase)
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE = u
SCREAMING_SNAKE_CASE = u.edges[v.id]
hq.heapify(_UpperCAmelCase)
for i in range(1 , len(_UpperCAmelCase)):
yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1)
def lowerCamelCase__ ():
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , '''num_encoder_blocks''' ) )
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=64 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=[2, 2, 2, 2] , _UpperCAmelCase=[8, 4, 2, 1] , _UpperCAmelCase=[16, 32, 64, 128] , _UpperCAmelCase=[1, 4, 8, 16] , _UpperCAmelCase=[1, 2, 4, 8] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=3 , _UpperCAmelCase=None , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : Optional[int] = image_size
__a : List[str] = num_channels
__a : List[str] = num_encoder_blocks
__a : int = sr_ratios
__a : str = depths
__a : Any = hidden_sizes
__a : Optional[int] = downsampling_rates
__a : List[Any] = num_attention_heads
__a : Optional[Any] = is_training
__a : int = use_labels
__a : List[Any] = hidden_act
__a : Any = hidden_dropout_prob
__a : Optional[Any] = attention_probs_dropout_prob
__a : Optional[int] = initializer_range
__a : Any = num_labels
__a : str = scope
def _lowerCamelCase ( self ):
__a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : int = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = SegformerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Any = model(_UpperCAmelCase )
__a : Tuple = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = self.num_labels
__a : int = SegformerForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__a : Optional[int] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = 1
__a : Optional[Any] = SegformerForSemanticSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_UpperCAmelCase )
__a : str = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertGreater(result.loss , 0.0 )
def _lowerCamelCase ( self ):
__a : str = self.prepare_config_and_inputs()
__a , __a , __a : str = config_and_inputs
__a : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : List[Any] = SegformerModelTester(self )
__a : Dict = SegformerConfigTester(self , config_class=_UpperCAmelCase )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_UpperCAmelCase )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(_UpperCAmelCase )
__a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Tuple = [*signature.parameters.keys()]
__a : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Tuple = True
for model_class in self.all_model_classes:
__a : List[str] = True
__a : Any = False
__a : str = True
__a : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__a : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__a : int = outputs.attentions
__a : int = sum(self.model_tester.depths )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Tuple = True
__a : int = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__a : Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__a : Dict = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first attentions (first block, first layer)
__a : int = (self.model_tester.image_size // 4) ** 2
__a : int = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__a : int = (self.model_tester.image_size // 32) ** 2
__a : Dict = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__a : int = len(_UpperCAmelCase )
# Check attention is always last and order is fine
__a : Union[str, Any] = True
__a : Tuple = True
__a : List[str] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__a : Tuple = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + 1 , len(_UpperCAmelCase ) )
__a : Tuple = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first attentions (first block, first layer)
__a : Optional[Any] = (self.model_tester.image_size // 4) ** 2
__a : List[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowerCamelCase ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__a : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__a : Optional[Any] = outputs.hidden_states
__a : Optional[Any] = self.model_tester.num_encoder_blocks
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Optional[int] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : int = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
if not self.model_tester.is_training:
return
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ):
continue
__a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
__a : Optional[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
__a : List[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCamelCase ( self ):
pass
@slow
def _lowerCamelCase ( self ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Dict = SegformerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __A ( ) -> Optional[int]:
__a : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
# only resize + normalize
__a : List[str] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
__a : List[Any] = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_UpperCAmelCase )
__a : Union[str, Any] = prepare_img()
__a : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
__a : Optional[Any] = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
__a : Dict = model(_UpperCAmelCase )
__a : int = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__a : List[str] = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def _lowerCamelCase ( self ):
# only resize + normalize
__a : Optional[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
__a : str = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(_UpperCAmelCase )
__a : List[str] = prepare_img()
__a : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
__a : Union[str, Any] = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
__a : str = model(_UpperCAmelCase )
__a : Optional[int] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__a : Optional[Any] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-1 ) )
@slow
def _lowerCamelCase ( self ):
# only resize + normalize
__a : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
__a : Tuple = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_UpperCAmelCase )
__a : List[str] = prepare_img()
__a : List[str] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
__a : Union[str, Any] = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
__a : Tuple = model(_UpperCAmelCase )
__a : int = outputs.logits.detach().cpu()
__a : List[Any] = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(500, 300)] )
__a : str = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__a : int = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__a : Optional[int] = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase ) | 52 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[Any] = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 73 | 0 |
import math
class _UpperCAmelCase :
"""simple docstring"""
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : list[list[float]] , lowerCAmelCase_ : list[int] ) -> int:
__lowerCAmelCase = 0.0
__lowerCAmelCase = 0.0
for i in range(len(lowerCAmelCase_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowercase ( self : str , lowerCAmelCase_ : list[list[int | float]] , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : float ) -> list[list[int | float]]:
for i in range(len(lowerCAmelCase_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def a_ ( ):
# Training Examples ( m, n )
__lowerCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__lowerCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__lowerCAmelCase = SelfOrganizingMap()
__lowerCAmelCase = 3
__lowerCAmelCase = 0.5
for _ in range(lowerCAmelCase_ ):
for j in range(len(lowerCAmelCase_ ) ):
# training sample
__lowerCAmelCase = training_samples[j]
# Compute the winning vector
__lowerCAmelCase = self_organizing_map.get_winner(lowerCAmelCase_, lowerCAmelCase_ )
# Update the winning vector
__lowerCAmelCase = self_organizing_map.update(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# classify test sample
__lowerCAmelCase = [0, 0, 0, 1]
__lowerCAmelCase = self_organizing_map.get_winner(lowerCAmelCase_, lowerCAmelCase_ )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 53 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Union[str, Any] = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _snake_case ( A__ ):
_lowercase : Optional[Any] = '''decision_transformer'''
_lowercase : str = ['''past_key_values''']
_lowercase : Union[str, Any] = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a=17 , a=4 , a=128 , a=4096 , a=True , a=1 , a=1024 , a=3 , a=1 , a=None , a="relu" , a=0.1 , a=0.1 , a=0.1 , a=1E-5 , a=0.02 , a=True , a=True , a=5_0256 , a=5_0256 , a=False , a=False , **a , ) -> List[str]:
SCREAMING_SNAKE_CASE = state_dim
SCREAMING_SNAKE_CASE = act_dim
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = max_ep_len
SCREAMING_SNAKE_CASE = action_tanh
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = n_inner
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = attn_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scale_attn_weights
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(bos_token_id=a , eos_token_id=a , **a)
| 73 | 0 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def lowerCAmelCase__ ( *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class A ( unittest.TestCase ):
_snake_case =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ =[
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: str ) -> int:
'''simple docstring'''
UpperCAmelCase_ =vqa_pipeline(_lowerCAmelCase , top_k=1 )
self.assertEqual(
_lowerCAmelCase , [
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
] , )
@require_torch
def lowerCAmelCase__ ( self: Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
@slow
@require_torch
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
pass
| 54 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ : Optional[int] = 16
a_ : Any = 32
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 16):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('bert-base-cased')
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc')
def tokenize_function(_UpperCAmelCase):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(_UpperCAmelCase):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE = 8
else:
SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
_UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
# Initialize accelerator
SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE = config['lr']
SCREAMING_SNAKE_CASE = int(config['num_epochs'])
SCREAMING_SNAKE_CASE = int(config['seed'])
SCREAMING_SNAKE_CASE = int(config['batch_size'])
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc')
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE = model.to(accelerator.device)
# Instantiate optimizer
SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=_UpperCAmelCase)
# Instantiate scheduler
SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# Now we train the model
for epoch in range(_UpperCAmelCase):
model.train()
for step, batch in enumerate(_UpperCAmelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.loss
SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _UpperCAmelCase)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
main()
| 73 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE :int = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Dict = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Union[str, Any] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[str] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : int = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 73 | 0 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _a (lowercase__ : Tuple , lowercase__ : bool = True , lowercase__ : float = math.inf , lowercase__ : float = -math.inf , lowercase__ : float = math.inf , lowercase__ : float = -math.inf , lowercase__ : bool = False , lowercase__ : float = 1_0_0 , lowercase__ : float = 0.01 , lowercase__ : float = 1 , ) -> Any:
"""simple docstring"""
__snake_case = False
__snake_case = search_prob
__snake_case = start_temperate
__snake_case = []
__snake_case = 0
__snake_case = None
while not search_end:
__snake_case = current_state.score()
if best_state is None or current_score > best_state.score():
__snake_case = current_state
scores.append(lowercase__ )
iterations += 1
__snake_case = None
__snake_case = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__snake_case = random.randint(0 , len(lowercase__ ) - 1 ) # picking a random neighbor
__snake_case = neighbors.pop(lowercase__ )
__snake_case = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__snake_case = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__snake_case = picked_neighbor
else:
__snake_case = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__snake_case = picked_neighbor
__snake_case = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__snake_case = True
else:
__snake_case = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowercase__ ) , lowercase__ )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def _a (lowercase__ : Tuple , lowercase__ : List[str] ) -> Any:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_a : List[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_a : Tuple = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
_a : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_a : str = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def _a (lowercase__ : List[str] , lowercase__ : Dict ) -> str:
"""simple docstring"""
return (3 * x**2) - (6 * y)
_a : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_a : Dict = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'''{local_min.score()}'''
)
_a : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_a : List[str] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'''{local_min.score()}'''
)
| 56 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False):
if radian_mode:
return [magnitude * cos(_UpperCAmelCase), magnitude * sin(_UpperCAmelCase)]
return [magnitude * cos(radians(_UpperCAmelCase)), magnitude * sin(radians(_UpperCAmelCase))]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 10**-1):
SCREAMING_SNAKE_CASE = cross(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = sum(_UpperCAmelCase)
return abs(_UpperCAmelCase) < eps
if __name__ == "__main__":
# Test to check if it works
a_ : int = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
a_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
a_ : Dict = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
a_ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
a_ : int = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
a_ : Optional[Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 | 0 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 3_2 , _lowerCamelCase = True , _lowerCamelCase = 1 / 2_5_5 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _lowerCamelCase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _lowerCamelCase = True , _lowerCamelCase=7 , _lowerCamelCase=3_0 , _lowerCamelCase=4_0_0 , _lowerCamelCase=3 , ):
UpperCamelCase_: str = parent
UpperCamelCase_: List[str] = do_resize
UpperCamelCase_: Optional[int] = size if size is not None else {'shortest_edge': 2_8_8}
UpperCamelCase_: Optional[Any] = size_divisor
UpperCamelCase_: List[str] = do_rescale
UpperCamelCase_: List[str] = rescale_factor
UpperCamelCase_: Optional[int] = do_normalize
UpperCamelCase_: str = do_center_crop
UpperCamelCase_: str = image_mean
UpperCamelCase_: str = image_std
UpperCamelCase_: Any = do_pad
UpperCamelCase_: Any = batch_size
UpperCamelCase_: List[str] = num_channels
UpperCamelCase_: Tuple = min_resolution
UpperCamelCase_: Optional[Any] = max_resolution
def _a ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _a ( self , _lowerCamelCase , _lowerCamelCase=False ):
if not batched:
UpperCamelCase_: Tuple = self.size['shortest_edge']
UpperCamelCase_: Dict = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image ):
UpperCamelCase_ ,UpperCamelCase_: List[Any] = image.size
else:
UpperCamelCase_ ,UpperCamelCase_: int = image.shape[1], image.shape[2]
UpperCamelCase_: List[str] = size / min(_lowerCamelCase , _lowerCamelCase )
if h < w:
UpperCamelCase_ ,UpperCamelCase_: str = size, scale * w
else:
UpperCamelCase_ ,UpperCamelCase_: str = scale * h, size
UpperCamelCase_: Dict = int((1_3_3_3 / 8_0_0) * size )
if max(_lowerCamelCase , _lowerCamelCase ) > max_size:
UpperCamelCase_: int = max_size / max(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Optional[int] = newh * scale
UpperCamelCase_: Optional[int] = neww * scale
UpperCamelCase_ ,UpperCamelCase_: Dict = int(newh + 0.5 ), int(neww + 0.5 )
UpperCamelCase_ ,UpperCamelCase_: int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCamelCase_: int = []
for image in image_inputs:
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase_: Optional[int] = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0]
UpperCamelCase_: Optional[Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Dict =BridgeTowerImageProcessor if is_vision_available() else None
def _a ( self ):
UpperCamelCase_: int = BridgeTowerImageProcessingTester(self )
@property
def _a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ):
UpperCamelCase_: Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'image_std' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'size' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'size_divisor' ) )
def _a ( self ):
pass
def _a ( self ):
# Initialize image processor
UpperCamelCase_: int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_: List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
UpperCamelCase_: List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase_ ,UpperCamelCase_: Dict = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_: Optional[Any] = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
UpperCamelCase_ ,UpperCamelCase_: str = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ):
# Initialize image processor
UpperCamelCase_: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase_: int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_: Optional[int] = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ):
# Initialize image processor
UpperCamelCase_: Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_: str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase_: List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_: Optional[int] = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
UpperCamelCase_ ,UpperCamelCase_: List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , ) | 57 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : int = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class _snake_case ( A__ ):
_lowercase : Dict = '''cvt'''
def __init__( self , a=3 , a=[7, 3, 3] , a=[4, 2, 2] , a=[2, 1, 1] , a=[64, 192, 384] , a=[1, 3, 6] , a=[1, 2, 10] , a=[4.0, 4.0, 4.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.1] , a=[True, True, True] , a=[False, False, True] , a=["dw_bn", "dw_bn", "dw_bn"] , a=[3, 3, 3] , a=[1, 1, 1] , a=[2, 2, 2] , a=[1, 1, 1] , a=[1, 1, 1] , a=0.02 , a=1E-12 , **a , ) -> List[Any]:
super().__init__(**a)
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = patch_stride
SCREAMING_SNAKE_CASE = patch_padding
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = depth
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = attention_drop_rate
SCREAMING_SNAKE_CASE = drop_rate
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = cls_token
SCREAMING_SNAKE_CASE = qkv_projection_method
SCREAMING_SNAKE_CASE = kernel_qkv
SCREAMING_SNAKE_CASE = padding_kv
SCREAMING_SNAKE_CASE = stride_kv
SCREAMING_SNAKE_CASE = padding_q
SCREAMING_SNAKE_CASE = stride_q
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
| 73 | 0 |
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
__lowerCAmelCase : str = '''src/transformers'''
# Matches is_xxx_available()
__lowerCAmelCase : Dict = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__lowerCAmelCase : Dict = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__lowerCAmelCase : List[str] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__lowerCAmelCase : List[str] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__lowerCAmelCase : int = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__lowerCAmelCase : Tuple = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__lowerCAmelCase : Dict = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__lowerCAmelCase : Union[str, Any] = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__lowerCAmelCase : int = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__lowerCAmelCase : Optional[int] = re.compile(R'''^\s*try:''')
# Catches a line with else:
__lowerCAmelCase : Dict = re.compile(R'''^\s*else:''')
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
if _re_test_backend.search(__UpperCamelCase ) is None:
return None
snake_case_ : Union[str, Any] = [b[0] for b in _re_backend.findall(__UpperCamelCase )]
backends.sort()
return "_and_".join(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : int = f.readlines()
snake_case_ : Union[str, Any] = 0
while line_index < len(__UpperCamelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
snake_case_ : Optional[Any] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
snake_case_ : Any = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__UpperCamelCase ):
snake_case_ : Dict = _re_one_line_import_struct.search(__UpperCamelCase ).groups()[0]
snake_case_ : Optional[Any] = re.findall(r"""\[([^\]]+)\]""" , __UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
snake_case_ : int = _re_import_struct_key_value.search(__UpperCamelCase )
if single_line_import_search is not None:
snake_case_ : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
snake_case_ : int = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
snake_case_ : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case_ : Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case_ : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
snake_case_ : Tuple = lines[line_index]
if _re_import_struct_add_one.search(__UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__UpperCamelCase ) is not None:
snake_case_ : int = _re_import_struct_add_many.search(__UpperCamelCase ).groups()[0].split(""", """ )
snake_case_ : Optional[int] = [obj[1:-1] for obj in imports if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif _re_between_brackets.search(__UpperCamelCase ) is not None:
snake_case_ : Optional[Any] = _re_between_brackets.search(__UpperCamelCase ).groups()[0].split(""", """ )
snake_case_ : Any = [obj[1:-1] for obj in imports if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif _re_quote_object.search(__UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(__UpperCamelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 1_2 + """\"""" ):
objects.append(line[1_3:-3] )
line_index += 1
snake_case_ : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
snake_case_ : Any = []
while (
line_index < len(__UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
snake_case_ : List[str] = lines[line_index]
snake_case_ : Optional[int] = _re_import.search(__UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
snake_case_ : Optional[Any] = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(__UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
snake_case_ : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case_ : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case_ : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
snake_case_ : int = lines[line_index]
snake_case_ : List[str] = _re_import.search(__UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
snake_case_ : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
def find_duplicates(__UpperCamelCase : Any ):
return [k for k, v in collections.Counter(__UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
snake_case_ : List[str] = []
for key in import_dict_objects.keys():
snake_case_ : Tuple = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
snake_case_ : int = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
snake_case_ : int = """base imports""" if key == """none""" else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = []
for root, _, files in os.walk(__UpperCamelCase ):
if "__init__.py" in files:
snake_case_ : Tuple = os.path.join(__UpperCamelCase , """__init__.py""" )
snake_case_ : List[Any] = parse_init(__UpperCamelCase )
if objects is not None:
snake_case_ : Tuple = analyze_results(*__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
snake_case_ : List[str] = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("""\n""".join(__UpperCamelCase ) )
if len(__UpperCamelCase ) > 0:
raise ValueError("""\n\n""".join(__UpperCamelCase ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = []
for path, directories, files in os.walk(__UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(__UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__UpperCamelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
snake_case_ : List[str] = str((Path(__UpperCamelCase ) / folder).relative_to(__UpperCamelCase ) )
snake_case_ : Optional[Any] = short_path.replace(os.path.sep , """.""" )
submodules.append(__UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
snake_case_ : List[Any] = str((Path(__UpperCamelCase ) / fname).relative_to(__UpperCamelCase ) )
snake_case_ : Union[str, Any] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(__UpperCamelCase )
return submodules
__lowerCAmelCase : Tuple = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def __lowerCAmelCase ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
snake_case_ : Tuple = direct_transformers_import(__UpperCamelCase )
snake_case_ : Dict = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__UpperCamelCase , """__init__.py""" ) , """r""" ) as f:
snake_case_ : Optional[int] = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , __UpperCamelCase ) ) )
snake_case_ : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__UpperCamelCase ) > 0:
snake_case_ : Optional[Any] = """\n""".join(F'- {module}' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F'{list_of_modules}\n'
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 58 |
def lowerCamelCase__ (_UpperCAmelCase = 10 , _UpperCAmelCase = 1000 , _UpperCAmelCase = True):
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase)
and isinstance(_UpperCAmelCase , _UpperCAmelCase)
and isinstance(_UpperCAmelCase , _UpperCAmelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)')
return min_val if option else max_val
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return int((number_a + number_a) / 2)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)')
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value')
def answer(_UpperCAmelCase) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...')
SCREAMING_SNAKE_CASE = lower
SCREAMING_SNAKE_CASE = higher
SCREAMING_SNAKE_CASE = []
while True:
SCREAMING_SNAKE_CASE = get_avg(_UpperCAmelCase , _UpperCAmelCase)
last_numbers.append(_UpperCAmelCase)
if answer(_UpperCAmelCase) == "low":
SCREAMING_SNAKE_CASE = number
elif answer(_UpperCAmelCase) == "high":
SCREAMING_SNAKE_CASE = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''')
print(F'''details : {last_numbers!s}''')
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = int(input('Enter lower value : ').strip())
SCREAMING_SNAKE_CASE = int(input('Enter high value : ').strip())
SCREAMING_SNAKE_CASE = int(input('Enter value to guess : ').strip())
guess_the_number(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
main()
| 73 | 0 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__A = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : str) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =TOKEN
HfFolder.save_token(UpperCAmelCase_)
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[Any]) ->int:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-model-flax")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org")
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
lowerCamelCase__: Dict =FlaxBertModel(UpperCAmelCase_)
model.push_to_hub("test-model-flax" , use_auth_token=self._token)
lowerCamelCase__: Union[str, Any] =FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""")
lowerCamelCase__: str =flatten_dict(unfreeze(model.params))
lowerCamelCase__: Union[str, Any] =flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
lowerCamelCase__: List[str] =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase_ , 1E-3 , msg=F"""{key} not identical""")
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase_ , repo_id="test-model-flax" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token)
lowerCamelCase__: Tuple =FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""")
lowerCamelCase__: Optional[int] =flatten_dict(unfreeze(model.params))
lowerCamelCase__: List[str] =flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
lowerCamelCase__: List[Any] =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase_ , 1E-3 , msg=F"""{key} not identical""")
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Any =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
lowerCamelCase__: List[str] =FlaxBertModel(UpperCAmelCase_)
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token)
lowerCamelCase__: Optional[int] =FlaxBertModel.from_pretrained("valid_org/test-model-flax-org")
lowerCamelCase__: int =flatten_dict(unfreeze(model.params))
lowerCamelCase__: Any =flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
lowerCamelCase__: List[str] =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase_ , 1E-3 , msg=F"""{key} not identical""")
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
UpperCAmelCase_ , repo_id="valid_org/test-model-flax-org" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token)
lowerCamelCase__: int =FlaxBertModel.from_pretrained("valid_org/test-model-flax-org")
lowerCamelCase__: Union[str, Any] =flatten_dict(unfreeze(model.params))
lowerCamelCase__: Union[str, Any] =flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
lowerCamelCase__: Tuple =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase_ , 1E-3 , msg=F"""{key} not identical""")
def lowerCAmelCase_ ( __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: int =True
lowerCamelCase__: Any =flatten_dict(modela.params )
lowerCamelCase__: Any =flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
lowerCamelCase__: Dict =False
return models_are_equal
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : str) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
lowerCamelCase__: str =FlaxBertModel(UpperCAmelCase_)
lowerCamelCase__: int ="bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCAmelCase_ , UpperCAmelCase_))
with self.assertRaises(UpperCAmelCase_):
lowerCamelCase__: Tuple =FlaxBertModel.from_pretrained(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =FlaxBertModel.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_)
self.assertTrue(check_models_equal(UpperCAmelCase_ , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
lowerCamelCase__: List[Any] =FlaxBertModel(UpperCAmelCase_)
lowerCamelCase__: int ="bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCAmelCase_ , UpperCAmelCase_) , max_shard_size="10KB")
with self.assertRaises(UpperCAmelCase_):
lowerCamelCase__: Dict =FlaxBertModel.from_pretrained(UpperCAmelCase_)
lowerCamelCase__: str =FlaxBertModel.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_)
self.assertTrue(check_models_equal(UpperCAmelCase_ , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: str ="bert"
lowerCamelCase__: Optional[Any] ="hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(UpperCAmelCase_):
lowerCamelCase__: Union[str, Any] =FlaxBertModel.from_pretrained(UpperCAmelCase_)
lowerCamelCase__: Tuple =FlaxBertModel.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] ="bert"
lowerCamelCase__: Optional[int] ="hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(UpperCAmelCase_):
lowerCamelCase__: Dict =FlaxBertModel.from_pretrained(UpperCAmelCase_)
lowerCamelCase__: Any =FlaxBertModel.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
| 59 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _snake_case :
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=False , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , use_stable_embedding=a , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Any:
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , )
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> int:
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a)
model.to(a)
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size)
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1)
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1)
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1]).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3))
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( A__ , A__ , A__ , unittest.TestCase ):
_lowercase : List[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_lowercase : str = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_lowercase : List[str] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : List[str] = False
_lowercase : Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 10] , config.vocab_size)
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
original_model.to(a)
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
scaled_model.to(a)
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5))
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5))
| 73 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.