code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
def lowerCAmelCase ( lowerCAmelCase_ )-> str:
if len(_a ) <= 1:
return [tuple(_a )]
lowerCAmelCase_ : Optional[int] = []
def generate(lowerCAmelCase_ , lowerCAmelCase_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , _a )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowerCAmelCase_ : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
lowerCAmelCase_ : Any = arr[k - 1], arr[0]
generate(k - 1 , _a )
generate(len(_a ) , _a )
return res
if __name__ == "__main__":
_UpperCAmelCase : Any =input("""Enter numbers separated by a comma:\n""").strip()
_UpperCAmelCase : Union[str, Any] =[int(item) for item in user_input.split(""",""")]
print(heaps(arr)) | 262 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[Any] = """gpt_neox"""
def __init__( self : List[str] , lowercase_ : str=50432 , lowercase_ : List[Any]=6144 , lowercase_ : List[Any]=44 , lowercase_ : Union[str, Any]=64 , lowercase_ : List[str]=24576 , lowercase_ : List[Any]="gelu" , lowercase_ : str=0.25 , lowercase_ : Optional[int]=10000 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : int=0.1 , lowercase_ : Tuple=2048 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : List[str]=1E-5 , lowercase_ : str=True , lowercase_ : str=0 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=False , lowercase_ : Optional[int]=True , lowercase_ : List[Any]=None , **lowercase_ : Optional[int] , ):
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : str = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : str = rotary_pct
snake_case_ : Dict = rotary_emb_base
snake_case_ : Optional[int] = attention_dropout
snake_case_ : Tuple = hidden_dropout
snake_case_ : Tuple = classifier_dropout
snake_case_ : List[str] = initializer_range
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Any = use_cache
snake_case_ : Optional[int] = tie_word_embeddings
snake_case_ : Any = use_parallel_residual
snake_case_ : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def _snake_case ( self : Optional[int] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
snake_case_ : Any = self.rope_scaling.get('''type''' , lowercase_ )
snake_case_ : Union[str, Any] = self.rope_scaling.get('''factor''' , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 264 | 0 |
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def a__ ( a__ , a__ , a__ , a__=5 ):
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count("""<mask>""" ) == 1
__SCREAMING_SNAKE_CASE = torch.tensor(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) ).unsqueeze(0 ) # Batch size 1
__SCREAMING_SNAKE_CASE = model(snake_case_ )[0] # The last hidden-state is the first element of the output tuple
__SCREAMING_SNAKE_CASE = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__SCREAMING_SNAKE_CASE = logits[0, masked_index, :]
__SCREAMING_SNAKE_CASE = logits.softmax(dim=0 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = prob.topk(k=snake_case_ , dim=0 )
__SCREAMING_SNAKE_CASE = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(snake_case_ ) )] )
__SCREAMING_SNAKE_CASE = tokenizer.mask_token
__SCREAMING_SNAKE_CASE = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
__SCREAMING_SNAKE_CASE = predicted_token_bpe.replace("""\u2581""" , """ """ )
if " {0}".format(snake_case_ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(snake_case_ ) , snake_case_ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(snake_case_ , snake_case_ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
UpperCAmelCase : Any = CamembertTokenizer.from_pretrained('camembert-base')
UpperCAmelCase : Optional[Any] = CamembertForMaskedLM.from_pretrained('camembert-base')
model.eval()
UpperCAmelCase : int = 'Le camembert est <mask> :)'
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 370 |
'''simple docstring'''
class lowerCAmelCase__ : # Public class to implement a graph
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = row
__SCREAMING_SNAKE_CASE = col
__SCREAMING_SNAKE_CASE = graph
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__SCREAMING_SNAKE_CASE = [-1, 0, 1, -1, 1, -1, 0, 1]
__SCREAMING_SNAKE_CASE = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> int: # And finally, count all islands.
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[False for j in range(self.COL )] for i in range(self.ROW )]
__SCREAMING_SNAKE_CASE = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += 1
return count
| 331 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Dict = StableUnCLIPImgaImgPipeline
snake_case__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
snake_case__ : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Union[str, Any] = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case__ : Union[str, Any] = frozenset([])
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE = 3_2
__SCREAMING_SNAKE_CASE = embedder_hidden_size
# image encoding components
__SCREAMING_SNAKE_CASE = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCAmelCase__ , projection_dim=UpperCAmelCase__ , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase__ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase__ , layers_per_block=1 , upcast_attention=UpperCAmelCase__ , use_linear_projection=UpperCAmelCase__ , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCAmelCase__ , steps_offset=1 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL()
__SCREAMING_SNAKE_CASE = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : Union[str, Any]=True ) -> Optional[int]:
if str(UpperCAmelCase__ ).startswith("mps" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
if pil_image:
__SCREAMING_SNAKE_CASE = input_image * 0.5 + 0.5
__SCREAMING_SNAKE_CASE = input_image.clamp(0 , 1 )
__SCREAMING_SNAKE_CASE = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__SCREAMING_SNAKE_CASE = DiffusionPipeline.numpy_to_pil(UpperCAmelCase__ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = StableUnCLIPImgaImgPipeline(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase__ )
inputs.update({"image_embeds": None} )
__SCREAMING_SNAKE_CASE = sd_pipe(**UpperCAmelCase__ ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__SCREAMING_SNAKE_CASE = np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase__ )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCAmelCase_ ( self : Any ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCAmelCase__ )
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Any ) -> List[str]:
__SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
__SCREAMING_SNAKE_CASE = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE = torch.Generator(device="cpu" ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase__ , "anime turle" , generator=UpperCAmelCase__ , output_type="np" )
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
__SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
__SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
__SCREAMING_SNAKE_CASE = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE = torch.Generator(device="cpu" ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase__ , "anime turle" , generator=UpperCAmelCase__ , output_type="np" )
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : int ) -> Any:
__SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE = pipe(
UpperCAmelCase__ , "anime turtle" , num_inference_steps=2 , output_type="np" , )
__SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 54 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowercase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :str , a :str = None , a :uuid.UUID = None , a :Tuple=None , a :Optional[Any]=None ) -> str:
if not conversation_id:
__UpperCamelCase : Dict = uuid.uuida()
if past_user_inputs is None:
__UpperCamelCase : List[Any] = []
if generated_responses is None:
__UpperCamelCase : Any = []
__UpperCamelCase : uuid.UUID = conversation_id
__UpperCamelCase : List[str] = past_user_inputs
__UpperCamelCase : List[str] = generated_responses
__UpperCamelCase : Optional[str] = text
def __eq__( self :Optional[int] , a :Optional[int] ) -> Union[str, Any]:
if not isinstance(a , a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _lowerCamelCase ( self :Optional[int] , a :str , a :bool = False ) -> str:
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".' )
__UpperCamelCase : Any = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
__UpperCamelCase : int = text
def _lowerCamelCase ( self :List[str] ) -> int:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__UpperCamelCase : Dict = None
def _lowerCamelCase ( self :Optional[int] , a :str ) -> Optional[int]:
self.generated_responses.append(a )
def _lowerCamelCase ( self :int ) -> Optional[Any]:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self :List[str] ) -> List[Any]:
__UpperCamelCase : Any = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
__UpperCamelCase : str = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
__lowercase , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :Tuple , *a :Tuple , **a :List[str] ) -> Tuple:
super().__init__(*a , **a )
if self.tokenizer.pad_token_id is None:
__UpperCamelCase : int = self.tokenizer.eos_token
def _lowerCamelCase ( self :Optional[int] , a :List[Any]=None , a :str=None , a :int=None , **a :str ) -> List[str]:
__UpperCamelCase : List[str] = {}
__UpperCamelCase : List[str] = {}
__UpperCamelCase : str = {}
if min_length_for_response is not None:
__UpperCamelCase : Optional[Any] = min_length_for_response
if minimum_tokens is not None:
__UpperCamelCase : List[str] = minimum_tokens
if "max_length" in generate_kwargs:
__UpperCamelCase : List[Any] = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__UpperCamelCase : List[Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(a )
return preprocess_params, forward_params, postprocess_params
def __call__( self :Dict , a :Union[Conversation, List[Conversation]] , a :List[Any]=0 , **a :Any ) -> Union[str, Any]:
__UpperCamelCase : Optional[int] = super().__call__(a , num_workers=a , **a )
if isinstance(a , a ) and len(a ) == 1:
return outputs[0]
return outputs
def _lowerCamelCase ( self :Tuple , a :Conversation , a :Dict=3_2 ) -> Dict[str, Any]:
if not isinstance(a , a ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
__UpperCamelCase : str = self.tokenizer._build_conversation_input_ids(a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__UpperCamelCase : Optional[Any] = self._legacy_parse_and_tokenize(a )
if self.framework == "pt":
__UpperCamelCase : Dict = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__UpperCamelCase : Any = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _lowerCamelCase ( self :Any , a :List[Any] , a :Optional[Any]=1_0 , **a :Tuple ) -> List[str]:
__UpperCamelCase : Union[str, Any] = generate_kwargs.get("max_length" , self.model.config.max_length )
__UpperCamelCase : Dict = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
__UpperCamelCase : Dict = max_length - minimum_tokens
__UpperCamelCase : Optional[int] = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
__UpperCamelCase : Dict = model_inputs["attention_mask"][:, -trim:]
__UpperCamelCase : List[str] = model_inputs.pop("conversation" )
__UpperCamelCase : Optional[int] = max_length
__UpperCamelCase : str = self.model.generate(**a , **a )
if self.model.config.is_encoder_decoder:
__UpperCamelCase : List[str] = 1
else:
__UpperCamelCase : Optional[int] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _lowerCamelCase ( self :List[Any] , a :str , a :Optional[int]=True ) -> Union[str, Any]:
__UpperCamelCase : List[str] = model_outputs["output_ids"]
__UpperCamelCase : Any = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=a , clean_up_tokenization_spaces=a , )
__UpperCamelCase : int = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(a )
return conversation
def _lowerCamelCase ( self :str , a :Conversation ) -> Dict:
__UpperCamelCase : int = self.tokenizer.eos_token_id
__UpperCamelCase : Any = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(a , add_special_tokens=a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(a , add_special_tokens=a ) )
if len(a ) > self.tokenizer.model_max_length:
__UpperCamelCase : Union[str, Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids | 232 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ : list[int] ) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
A__ = sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = VideoToVideoSDPipeline
lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'}
lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'}
lowerCamelCase = PipelineTesterMixin.required_optional_params - {'latents'}
lowerCamelCase = False
# No `output_type`.
lowerCamelCase = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def snake_case__ ( self : Tuple )-> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4),layers_per_block=2,sample_size=3_2,in_channels=4,out_channels=4,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'),up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'),cross_attention_dim=3_2,attention_head_dim=4,)
A__ = DDIMScheduler(
beta_start=0.00_085,beta_end=0.012,beta_schedule='scaled_linear',clip_sample=lowercase_,set_alpha_to_one=lowercase_,)
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[3_2, 6_4],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=1_2_8,)
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=3_2,intermediate_size=3_7,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,hidden_act='gelu',projection_dim=5_1_2,)
A__ = CLIPTextModel(lowercase_ )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int],lowercase_ : List[Any]=0 )-> Any:
'''simple docstring'''
A__ = floats_tensor((1, 3, 3, 3_2, 3_2),rng=random.Random(lowercase_ ) ).to(lowercase_ )
if str(lowercase_ ).startswith('mps' ):
A__ = torch.manual_seed(lowercase_ )
else:
A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def snake_case__ ( self : List[Any] )-> List[Any]:
'''simple docstring'''
A__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = VideoToVideoSDPipeline(**lowercase_ )
A__ = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
A__ = self.get_dummy_inputs(lowercase_ )
A__ = 'np'
A__ = sd_pipe(**lowercase_ ).frames
A__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
A__ = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case__ ( self : Optional[Any] )-> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase_,expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case__ ( self : Any )-> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case__ ( self : int )-> int:
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case__ ( self : List[Any] )-> List[str]:
'''simple docstring'''
pass
def snake_case__ ( self : Optional[int] )-> Optional[Any]:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] )-> Dict:
'''simple docstring'''
A__ = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL',torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6),generator=lowercase_ )
A__ = video.to('cuda' )
A__ = 'Spiderman is surfing'
A__ = pipe(lowercase_,video=lowercase_,generator=lowercase_,num_inference_steps=3,output_type='pt' ).frames
A__ = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 282 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Dict = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class snake_case ( __lowerCAmelCase ):
"""simple docstring"""
_lowerCamelCase = "xmod"
def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1e-12 , UpperCamelCase=1 , UpperCamelCase=0 , UpperCamelCase=2 , UpperCamelCase="absolute" , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=False , UpperCamelCase=2 , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=("en_XX",) , UpperCamelCase=None , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = use_cache
lowerCamelCase_ = classifier_dropout
lowerCamelCase_ = pre_norm
lowerCamelCase_ = adapter_reduction_factor
lowerCamelCase_ = adapter_layer_norm
lowerCamelCase_ = adapter_reuse_layer_norm
lowerCamelCase_ = ln_before_adapter
lowerCamelCase_ = list(UpperCamelCase )
lowerCamelCase_ = default_language
class snake_case ( __lowerCAmelCase ):
"""simple docstring"""
@property
def snake_case ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 55 | """simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowercase__ = TypeVar('T')
lowercase__ = Union[List[T], Tuple[T, ...]]
lowercase__ = Union[T, List[T], Dict[str, T]]
lowercase__ = Union[str, bytes, os.PathLike]
| 290 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : List[str]=3 , lowerCAmelCase : List[Any]=18 , lowerCAmelCase : Dict=30 , lowerCAmelCase : Optional[Any]=4_00 , lowerCAmelCase : Any=True , lowerCAmelCase : str=None , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=False , lowerCAmelCase : Any=True , lowerCAmelCase : str=True , lowerCAmelCase : List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase : Optional[int]=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : Optional[Any] = batch_size
__lowerCAmelCase : str = num_channels
__lowerCAmelCase : List[str] = image_size
__lowerCAmelCase : List[Any] = min_resolution
__lowerCAmelCase : Union[str, Any] = max_resolution
__lowerCAmelCase : Union[str, Any] = do_resize
__lowerCAmelCase : str = size if size is not None else {'''height''': 18, '''width''': 20}
__lowerCAmelCase : List[str] = do_thumbnail
__lowerCAmelCase : str = do_align_axis
__lowerCAmelCase : Optional[Any] = do_pad
__lowerCAmelCase : Tuple = do_normalize
__lowerCAmelCase : List[Any] = image_mean
__lowerCAmelCase : List[str] = image_std
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Tuple =DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_thumbnail""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_align_long_axis""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_pad""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """image_std""" ) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
__lowerCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
__lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
__lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__lowerCAmelCase : Optional[int] = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
__lowerCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__lowerCAmelCase : int = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__lowerCAmelCase : Dict = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 356 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCAmelCase = logging.get_logger(__name__)
def snake_case_ (__A : List[str] , __A : str ) -> int:
__lowerCAmelCase : str = set()
__lowerCAmelCase : int = []
def parse_line(__A : List[Any] ):
for line in fp:
if isinstance(__A , __A ):
__lowerCAmelCase : str = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(__A ) > 0:
__lowerCAmelCase : Tuple = """\n""".join(__A )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(__A )
buffer.clear()
continue
else:
__lowerCAmelCase : Optional[int] = line.strip()
buffer.append(__A )
if from_gh:
for filename in os.listdir(__A ):
__lowerCAmelCase : Optional[Any] = os.path.join(__A , __A )
if not os.path.isdir(__A ):
# read the file
if filename != "warnings.txt":
continue
with open(__A ) as fp:
parse_line(__A )
else:
try:
with zipfile.ZipFile(__A ) as z:
for filename in z.namelist():
if not os.path.isdir(__A ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__A ) as fp:
parse_line(__A )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def snake_case_ (__A : Dict , __A : Union[str, Any] ) -> Dict:
__lowerCAmelCase : Any = set()
__lowerCAmelCase : Optional[int] = [os.path.join(__A , __A ) for p in os.listdir(__A ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__A , __A ) )
return selected_warnings
if __name__ == "__main__":
def snake_case_ (__A : int ) -> Tuple:
return values.split(""",""" )
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCAmelCase = extract_warnings(args.output_dir, args.targets)
__UpperCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 139 | 0 |
"""simple docstring"""
def lowercase ( ) -> Any:
_UpperCamelCase = []
_UpperCamelCase = 1
while len(a__ ) < 1e6:
constant.append(str(a__ ) )
i += 1
_UpperCamelCase = ''''''.join(a__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 256 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0.0_2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = rotary_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = FlaxGPTJModelTester(self )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__UpperCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )
__UpperCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = False
__UpperCamelCase = model.config.eos_token_id
__UpperCamelCase = jax.jit(model.generate )
__UpperCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__UpperCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__UpperCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCAmelCase )
__UpperCamelCase = fx_state
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = model_class.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase )
__UpperCamelCase = fx_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = load_flax_weights_in_pytorch_model(__UpperCAmelCase , fx_model.params )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = pt_model_class.from_pretrained(__UpperCAmelCase , from_flax=__UpperCAmelCase )
with torch.no_grad():
__UpperCamelCase = pt_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
| 316 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : str = 16
lowercase__ : List[Any] = 32
def __lowercase ( _a , _a = 16 ):
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case_ : int = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_a ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_a , max_length=_a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : Dict = datasets.map(
_a , batched=_a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : str = 16
elif accelerator.mixed_precision != "no":
snake_case_ : Union[str, Any] = 8
else:
snake_case_ : Optional[int] = None
return tokenizer.pad(
_a , padding='''longest''' , max_length=_a , pad_to_multiple_of=_a , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case_ : Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=_a , collate_fn=_a , batch_size=_a )
snake_case_ : Dict = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_a , collate_fn=_a , batch_size=_a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : Dict = mocked_dataloaders # noqa: F811
def __lowercase ( _a , _a ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _a ) == "1":
snake_case_ : List[str] = 2
# New Code #
snake_case_ : List[str] = int(args.gradient_accumulation_steps )
snake_case_ : Optional[int] = int(args.local_sgd_steps )
# Initialize accelerator
snake_case_ : Dict = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_a )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : str = config['''lr''']
snake_case_ : Dict = int(config['''num_epochs'''] )
snake_case_ : Tuple = int(config['''seed'''] )
snake_case_ : Optional[int] = int(config['''batch_size'''] )
snake_case_ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
set_seed(_a )
snake_case_, snake_case_ : Tuple = get_dataloaders(_a , _a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : str = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : List[str] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : int = AdamW(params=model.parameters() , lr=_a )
# Instantiate scheduler
snake_case_ : int = get_linear_schedule_with_warmup(
optimizer=_a , num_warmup_steps=100 , num_training_steps=(len(_a ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Union[str, Any] = accelerator.prepare(
_a , _a , _a , _a , _a )
# Now we train the model
for epoch in range(_a ):
model.train()
with LocalSGD(
accelerator=_a , model=_a , local_sgd_steps=_a , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_a ):
snake_case_ : List[str] = model(**_a )
snake_case_ : Optional[Any] = output.loss
accelerator.backward(_a )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Optional[int] = model(**_a )
snake_case_ : List[str] = outputs.logits.argmax(dim=-1 )
snake_case_, snake_case_ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_a , references=_a , )
snake_case_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _a )
def __lowercase ( ):
snake_case_ : Dict = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_a , default=_a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_a , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=_a , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
snake_case_ : str = parser.parse_args()
snake_case_ : int = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_a , _a )
if __name__ == "__main__":
main()
| 155 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _UpperCAmelCase ( unittest.TestCase):
_lowerCAmelCase : Optional[int] = MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCAmelCase : Union[str, Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _snake_case ( self : Any ):
snake_case_ : Dict = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
snake_case_ : List[str] = text_generator('''This is a test''' , do_sample=lowercase_ )
self.assertEqual(
lowercase_ , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
snake_case_ : Tuple = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
lowercase_ , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
snake_case_ : int = text_generator('''This is a test''' , do_sample=lowercase_ , num_return_sequences=2 , return_tensors=lowercase_ )
self.assertEqual(
lowercase_ , [
{'''generated_token_ids''': ANY(lowercase_ )},
{'''generated_token_ids''': ANY(lowercase_ )},
] , )
snake_case_ : Tuple = text_generator.model.config.eos_token_id
snake_case_ : Any = '''<pad>'''
snake_case_ : Optional[Any] = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=lowercase_ , num_return_sequences=2 , batch_size=2 , return_tensors=lowercase_ , )
self.assertEqual(
lowercase_ , [
[
{'''generated_token_ids''': ANY(lowercase_ )},
{'''generated_token_ids''': ANY(lowercase_ )},
],
[
{'''generated_token_ids''': ANY(lowercase_ )},
{'''generated_token_ids''': ANY(lowercase_ )},
],
] , )
@require_tf
def _snake_case ( self : Any ):
snake_case_ : List[str] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
snake_case_ : List[Any] = text_generator('''This is a test''' , do_sample=lowercase_ )
self.assertEqual(
lowercase_ , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
snake_case_ : Tuple = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=lowercase_ )
self.assertEqual(
lowercase_ , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def _snake_case ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int ):
snake_case_ : str = TextGenerationPipeline(model=lowercase_ , tokenizer=lowercase_ )
return text_generator, ["This is a test", "Another test"]
def _snake_case ( self : Any ):
snake_case_ : int = '''Hello I believe in'''
snake_case_ : Dict = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ : Optional[Any] = text_generator(lowercase_ )
self.assertEqual(
lowercase_ , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
snake_case_ : Any = text_generator(lowercase_ , stop_sequence=''' fe''' )
self.assertEqual(lowercase_ , [{'''generated_text''': '''Hello I believe in fe'''}] )
def _snake_case ( self : Optional[int] , lowercase_ : str , lowercase_ : List[Any] ):
snake_case_ : Any = text_generator.model
snake_case_ : str = text_generator.tokenizer
snake_case_ : Tuple = text_generator('''This is a test''' )
self.assertEqual(lowercase_ , [{'''generated_text''': ANY(lowercase_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
snake_case_ : Any = text_generator('''This is a test''' , return_full_text=lowercase_ )
self.assertEqual(lowercase_ , [{'''generated_text''': ANY(lowercase_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
snake_case_ : Optional[Any] = pipeline(task='''text-generation''' , model=lowercase_ , tokenizer=lowercase_ , return_full_text=lowercase_ )
snake_case_ : str = text_generator('''This is a test''' )
self.assertEqual(lowercase_ , [{'''generated_text''': ANY(lowercase_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
snake_case_ : List[str] = text_generator('''This is a test''' , return_full_text=lowercase_ )
self.assertEqual(lowercase_ , [{'''generated_text''': ANY(lowercase_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
snake_case_ : List[Any] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=lowercase_ )
self.assertEqual(
lowercase_ , [
[{'''generated_text''': ANY(lowercase_ )}, {'''generated_text''': ANY(lowercase_ )}],
[{'''generated_text''': ANY(lowercase_ )}, {'''generated_text''': ANY(lowercase_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
snake_case_ : List[Any] = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=lowercase_ )
self.assertEqual(
lowercase_ , [
[{'''generated_text''': ANY(lowercase_ )}, {'''generated_text''': ANY(lowercase_ )}],
[{'''generated_text''': ANY(lowercase_ )}, {'''generated_text''': ANY(lowercase_ )}],
] , )
with self.assertRaises(lowercase_ ):
snake_case_ : int = text_generator('''test''' , return_full_text=lowercase_ , return_text=lowercase_ )
with self.assertRaises(lowercase_ ):
snake_case_ : Dict = text_generator('''test''' , return_full_text=lowercase_ , return_tensors=lowercase_ )
with self.assertRaises(lowercase_ ):
snake_case_ : Dict = text_generator('''test''' , return_text=lowercase_ , return_tensors=lowercase_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
snake_case_ : str = text_generator('''''' )
self.assertEqual(lowercase_ , [{'''generated_text''': ANY(lowercase_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
snake_case_ : List[str] = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
snake_case_ : List[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
snake_case_ : Tuple = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(lowercase_ ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _snake_case ( self : Optional[int] ):
import torch
# Classic `model_kwargs`
snake_case_ : List[str] = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ : Tuple = pipe('''This is a test''' )
self.assertEqual(
lowercase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
snake_case_ : Optional[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ : Optional[Any] = pipe('''This is a test''' )
self.assertEqual(
lowercase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
snake_case_ : Tuple = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
snake_case_ : int = pipe('''This is a test''' )
self.assertEqual(
lowercase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def _snake_case ( self : List[str] ):
import torch
snake_case_ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def _snake_case ( self : Dict ):
import torch
snake_case_ : int = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=lowercase_ , top_p=0.5 )
def _snake_case ( self : int ):
snake_case_ : int = '''Hello world'''
snake_case_ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
snake_case_ : Optional[Any] = logging.get_logger('''transformers.generation.tf_utils''' )
else:
snake_case_ : Dict = logging.get_logger('''transformers.generation.utils''' )
snake_case_ : Tuple = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(lowercase_ ) as cl:
snake_case_ : List[Any] = text_generator(lowercase_ , max_length=10 , max_new_tokens=1 )
self.assertIn(lowercase_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(lowercase_ ) as cl:
snake_case_ : int = text_generator(lowercase_ , max_new_tokens=1 )
self.assertNotIn(lowercase_ , cl.out )
with CaptureLogger(lowercase_ ) as cl:
snake_case_ : Optional[Any] = text_generator(lowercase_ , max_length=10 )
self.assertNotIn(lowercase_ , cl.out )
| 155 | 1 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCamelCase__ : Union[str, Any] = len(bin(__lowerCAmelCase )[3:] )
UpperCamelCase__ : List[str] = bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:]
UpperCamelCase__ : List[str] = (
(
"1"
+ "0" * (binary_number_length - len(__lowerCAmelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 189 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCamelCase : int =TypeVar('''T''')
class __a ( Generic[T] ):
_lowerCAmelCase : deque[T] # Cache store of keys
_lowerCAmelCase : set[T] # References of the keys in cache
_lowerCAmelCase : int = 1_0 # Maximum capacity of cache
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
UpperCamelCase__ : Tuple = deque()
UpperCamelCase__ : Optional[int] = set()
if not n:
UpperCamelCase__ : Tuple = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
UpperCamelCase__ : Dict = n
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCamelCase__ : int = self.dq_store.pop()
self.key_reference.remove(SCREAMING_SNAKE_CASE )
else:
self.dq_store.remove(SCREAMING_SNAKE_CASE )
self.dq_store.appendleft(SCREAMING_SNAKE_CASE )
self.key_reference.add(SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict ):
'''simple docstring'''
for k in self.dq_store:
print(SCREAMING_SNAKE_CASE )
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : LRUCache[str | int] =LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]" | 189 | 1 |
from __future__ import annotations
import math
def snake_case_ ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , snake_case , snake_case , snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , snake_case , snake_case , snake_case ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , snake_case , snake_case , snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , snake_case , snake_case , snake_case ) , )
)
def snake_case_ ( ) -> None:
lowercase__: List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
lowercase__: List[str] = math.log(len(snake_case ) , 2 )
print(f'Optimal value : {minimax(0 , 0 , snake_case , snake_case , snake_case )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 288 |
from collections import deque
from math import floor
from random import random
from time import time
class __a :
def __init__( self ) -> Dict:
'''simple docstring'''
lowercase__: Dict = {}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ) -> Optional[int]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowercase__: int = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
lowercase__: Union[str, Any] = []
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
return list(self.graph )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Union[str, Any]:
'''simple docstring'''
if s == d:
return []
lowercase__: Tuple = []
lowercase__: Tuple = []
if s == -2:
lowercase__: Any = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
lowercase__: Optional[int] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-1 ) -> List[str]:
'''simple docstring'''
if c == -1:
lowercase__: int = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__: str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> Dict:
'''simple docstring'''
lowercase__: int = deque()
lowercase__: Dict = []
if s == -2:
lowercase__: Optional[int] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
lowercase__: str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return len(self.graph[u] )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Tuple = []
lowercase__: str = []
if s == -2:
lowercase__: Dict = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: List[Any] = s
lowercase__: Any = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Dict = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
lowercase__: int = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: List[Any] = []
lowercase__: int = []
lowercase__: List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = -2
lowercase__: Union[str, Any] = []
lowercase__: List[str] = s
lowercase__: Dict = False
lowercase__: Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: List[Any] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: Any = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Union[str, Any] = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: int = s
lowercase__: str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Any = []
lowercase__: int = []
lowercase__: Dict = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Optional[int] = -2
lowercase__: List[Any] = []
lowercase__: List[str] = s
lowercase__: List[Any] = False
lowercase__: str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Any = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[Any] = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Dict = s
lowercase__: Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Dict:
'''simple docstring'''
lowercase__: Union[str, Any] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: Optional[Any] = time()
return end - begin
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[str]:
'''simple docstring'''
lowercase__: str = time()
self.bfs(lowerCAmelCase__ )
lowercase__: List[str] = time()
return end - begin
class __a :
def __init__( self ) -> Tuple:
'''simple docstring'''
lowercase__: Dict = {}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ) -> List[Any]:
'''simple docstring'''
# check if the u exists
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowercase__: str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowercase__: Union[str, Any] = [[w, u]]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> List[str]:
'''simple docstring'''
if s == d:
return []
lowercase__: str = []
lowercase__: int = []
if s == -2:
lowercase__: Tuple = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
lowercase__: Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-1 ) -> Optional[Any]:
'''simple docstring'''
if c == -1:
lowercase__: Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__: Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[Any]:
'''simple docstring'''
lowercase__: str = deque()
lowercase__: List[Any] = []
if s == -2:
lowercase__: str = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
lowercase__: Union[str, Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return len(self.graph[u] )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: str = []
lowercase__: Dict = []
lowercase__: Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = -2
lowercase__: Dict = []
lowercase__: List[Any] = s
lowercase__: Union[str, Any] = False
lowercase__: List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: str = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Dict = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: int = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Tuple = s
lowercase__: List[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Tuple = []
lowercase__: Optional[int] = []
lowercase__: Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Tuple = -2
lowercase__: Any = []
lowercase__: int = s
lowercase__: Optional[int] = False
lowercase__: List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Union[str, Any] = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: List[str] = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: List[str] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Dict = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Optional[Any] = s
lowercase__: Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return list(self.graph )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: List[Any] = time()
return end - begin
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[Any]:
'''simple docstring'''
lowercase__: str = time()
self.bfs(lowerCAmelCase__ )
lowercase__: List[str] = time()
return end - begin
| 288 | 1 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , lowercase , lowercase , lowercase=1024 , lowercase=1024 , lowercase=3.6 ) -> Tuple:
'''simple docstring'''
A__ = tokenizer
A__ = tokenizer.bos_token_id
A__ = dataset
A__ = seq_length
A__ = seq_length * chars_per_token * num_of_sequences
def __iter__( self ) -> Tuple:
'''simple docstring'''
A__ = iter(self.dataset )
A__ = True
while more_examples:
A__ , A__ = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowercase )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
A__ = False
break
A__ = tokenizer(lowercase , truncation=lowercase )["input_ids"]
A__ = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowercase ) , self.seq_length ):
A__ = all_token_ids[i : i + self.seq_length]
if len(lowercase ) == self.seq_length:
yield torch.tensor(lowercase )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> List[str]:
'''simple docstring'''
A__ = {"streaming": True}
A__ = load_dataset(args.dataset_name , split="train" , **SCREAMING_SNAKE_CASE_ )
A__ = ConstantLengthDataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , seq_length=args.seq_length )
A__ = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=args.batch_size )
return eval_dataloader
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> int:
'''simple docstring'''
model.eval()
A__ = []
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
with torch.no_grad():
A__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
A__ = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(SCREAMING_SNAKE_CASE_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A__ = torch.mean(torch.cat(SCREAMING_SNAKE_CASE_ ) )
try:
A__ = torch.exp(SCREAMING_SNAKE_CASE_ )
except OverflowError:
A__ = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase__ = Accelerator()
# Parse configuration
lowerCAmelCase__ = HfArgumentParser(EvaluationArguments)
lowerCAmelCase__ = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase__ = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase__ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
lowerCAmelCase__ , lowerCAmelCase__ = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 68 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=a_ , text_encoder=a_ , tokenizer=a_ , unet=a_ , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , )
def SCREAMING_SNAKE_CASE (self , a_ = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.enable_attention_slicing(a_ )
@torch.no_grad()
def __call__(self , a_ , a_ = 5_12 , a_ = 5_12 , a_ = 50 , a_ = 7.5 , a_ = None , a_ = 1 , a_ = 0.0 , a_ = None , a_ = None , a_ = "pil" , a_ = True , a_ = None , a_ = 1 , a_ = None , **a_ , ):
'''simple docstring'''
if isinstance(a_ , a_ ):
__snake_case : Any = 1
elif isinstance(a_ , a_ ):
__snake_case : Any = len(a_ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a_ , a_ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(a_ )}.""" )
# get prompt text embeddings
__snake_case : int = self.tokenizer(
a_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__snake_case : int = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__snake_case : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__snake_case : Optional[int] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : Union[str, Any] = text_embeddings.shape
__snake_case : Optional[int] = text_embeddings.repeat(1 , a_ , 1 )
__snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , a_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : List[Any] = ['''''']
elif type(a_ ) is not type(a_ ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a_ )} !="""
f""" {type(a_ )}.""" )
elif isinstance(a_ , a_ ):
__snake_case : List[str] = [negative_prompt]
elif batch_size != len(a_ ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a_ )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
__snake_case : Optional[int] = negative_prompt
__snake_case : Optional[int] = text_input_ids.shape[-1]
__snake_case : List[Any] = self.tokenizer(
a_ , padding='''max_length''' , max_length=a_ , truncation=a_ , return_tensors='''pt''' , )
__snake_case : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : str = uncond_embeddings.shape[1]
__snake_case : int = uncond_embeddings.repeat(a_ , a_ , 1 )
__snake_case : int = uncond_embeddings.view(batch_size * num_images_per_prompt , a_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__snake_case : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Union[str, Any] = torch.randn(
a_ , generator=a_ , device='''cpu''' , dtype=a_ ).to(self.device )
__snake_case : Tuple = torch.randn(a_ , generator=a_ , device='''cpu''' , dtype=a_ ).to(
self.device )
else:
__snake_case : Dict = torch.randn(
a_ , generator=a_ , device=self.device , dtype=a_ )
__snake_case : Dict = torch.randn(a_ , generator=a_ , device=self.device , dtype=a_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__snake_case : Union[str, Any] = latents_reference.to(self.device )
__snake_case : Dict = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__snake_case : int = (latents_shape[3] - latents_shape_reference[3]) // 2
__snake_case : Tuple = (latents_shape[2] - latents_shape_reference[2]) // 2
__snake_case : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__snake_case : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__snake_case : int = 0 if dx < 0 else dx
__snake_case : Union[str, Any] = 0 if dy < 0 else dy
__snake_case : str = max(-dx , 0 )
__snake_case : Tuple = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__snake_case : Any = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(a_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Optional[Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : Tuple = {}
if accepts_eta:
__snake_case : List[str] = eta
for i, t in enumerate(self.progress_bar(a_ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Tuple = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
__snake_case : int = self.unet(a_ , a_ , encoder_hidden_states=a_ ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : Tuple = noise_pred.chunk(2 )
__snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a_ , a_ , a_ )
__snake_case : Union[str, Any] = 1 / 0.1_8215 * latents
__snake_case : Optional[Any] = self.vae.decode(a_ ).sample
__snake_case : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__snake_case : Optional[int] = self.feature_extractor(self.numpy_to_pil(a_ ) , return_tensors='''pt''' ).to(
self.device )
__snake_case , __snake_case : List[Any] = self.safety_checker(
images=a_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__snake_case : Union[str, Any] = None
if output_type == "pil":
__snake_case : Union[str, Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=a_ , nsfw_content_detected=a_ )
| 102 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __a ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
for char in word:
A = ord(__SCREAMING_SNAKE_CASE )
if not _is_chinese_char(__SCREAMING_SNAKE_CASE ):
return 0
return 1
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
A = set()
for token in tokens:
A = len(__SCREAMING_SNAKE_CASE ) > 1 and is_chinese(__SCREAMING_SNAKE_CASE )
if chinese_word:
word_set.add(__SCREAMING_SNAKE_CASE )
A = list(__SCREAMING_SNAKE_CASE )
return word_list
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
A = max([len(__SCREAMING_SNAKE_CASE ) for w in chinese_word_set] )
A = bert_tokens
A = 0, len(__SCREAMING_SNAKE_CASE )
while start < end:
A = True
if is_chinese(bert_word[start] ):
A = min(end - start , __SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE , 1 , -1 ):
A = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
A = """##""" + bert_word[j]
A = start + i
A = False
break
if single_word:
start += 1
return bert_word
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
A = []
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , 100 ):
A = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
A = [get_chinese_word(__SCREAMING_SNAKE_CASE ) for r in res]
ltp_res.extend(__SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
A = []
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , 100 ):
A = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
A = []
for input_ids, chinese_word in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
A = []
for id in input_ids:
A = bert_tokenizer._convert_id_to_token(__SCREAMING_SNAKE_CASE )
input_tokens.append(__SCREAMING_SNAKE_CASE )
A = add_sub_symbol(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
A = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__SCREAMING_SNAKE_CASE ):
if token[:2] == "##":
A = token[2:]
# save chinese tokens' pos
if len(__SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(__SCREAMING_SNAKE_CASE ) ):
ref_id.append(__SCREAMING_SNAKE_CASE )
ref_ids.append(__SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
return ref_ids
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
A = f.readlines()
A = [line.strip() for line in data if len(__SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A = LTP(args.ltp ) # faster in GPU device
A = BertTokenizer.from_pretrained(args.bert )
A = prepare_ref(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
A = [json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" for ref in ref_ids]
f.writelines(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
_lowerCamelCase : str = parser.parse_args()
main(args)
| 370 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 337 | 0 |
"""simple docstring"""
import operator as op
_a : List[str]= "scaler.pt"
_a : Tuple= "pytorch_model"
_a : List[Any]= "random_states"
_a : Tuple= "optimizer"
_a : Any= "scheduler"
_a : List[str]= "pytorch_model.bin"
_a : Any= "pytorch_model.bin.index.json"
_a : Dict= "model.safetensors"
_a : List[Any]= "model.safetensors.index.json"
_a : Optional[Any]= "1.10.2"
_a : Optional[int]= "py38"
_a : str= "4.17.0"
_a : List[str]= ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
_a : int= ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
_a : Optional[Any]= ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
_a : Optional[Any]= ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
_a : List[Any]= ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
_a : Union[str, Any]= "2.0.1"
_a : Any= ["pdsh", "standard", "openmpi", "mvapich"]
_a : str= ["default", "reduce-overhead", "max-autotune"]
_a : Tuple= {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
_a : List[Any]= [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
_a : Tuple= ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
_a : List[str]= ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 172 | """simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Optional[Any] = ["""image_processor""", """tokenizer"""]
UpperCAmelCase : Dict = """CLIPImageProcessor"""
UpperCAmelCase : Dict = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__(self : Union[str, Any] , _A : Dict=None , _A : Tuple=None , **_A : Optional[int]) -> Optional[int]:
__snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _A , )
__snake_case : List[Any] = kwargs.pop('feature_extractor')
__snake_case : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_A , _A)
def __call__(self : Dict , _A : Tuple=None , _A : Optional[int]=None , _A : Tuple=None , **_A : Any) -> int:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
__snake_case : List[str] = self.tokenizer(_A , return_tensors=_A , **_A)
if images is not None:
__snake_case : Any = self.image_processor(_A , return_tensors=_A , **_A)
if text is not None and images is not None:
__snake_case : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A) , tensor_type=_A)
def _lowercase (self : List[Any] , *_A : Dict , **_A : int) -> int:
return self.tokenizer.batch_decode(*_A , **_A)
def _lowercase (self : List[Any] , *_A : Union[str, Any] , **_A : Tuple) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A)
@property
def _lowercase (self : Union[str, Any]) -> List[Any]:
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 172 | 1 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , use_stable_embedding=__SCREAMING_SNAKE_CASE , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['hidden_states'][0]
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['hidden_states'][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
snake_case__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
snake_case__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
snake_case__ = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'single_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'multi_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
original_model.to(__SCREAMING_SNAKE_CASE )
original_model.eval()
lowerCAmelCase = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
lowerCAmelCase = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = {'type': scaling_type, 'factor': 10.0}
lowerCAmelCase = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
scaled_model.to(__SCREAMING_SNAKE_CASE )
scaled_model.eval()
lowerCAmelCase = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
lowerCAmelCase = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-5 ) )
| 352 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCamelCase : str = [0, 25, 50]
__UpperCamelCase : int = [25, 50, 75]
__UpperCamelCase : str = fuzz.membership.trimf(X, abca)
__UpperCamelCase : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCamelCase : Dict = np.ones(75)
__UpperCamelCase : str = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__UpperCamelCase : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCamelCase : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCamelCase : Dict = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCamelCase : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCamelCase : List[str] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCamelCase : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCamelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 309 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCamelCase : Any = get_tests_dir("fixtures")
lowerCamelCase : int = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowerCamelCase : str = get_tests_dir("fixtures/dummy-config.json")
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = 0
def a__ ( self : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(A_ , A_ )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(A_ ).to_dict()
config_dict.pop('feature_extractor_type' )
lowerCamelCase_ = WavaVecaFeatureExtractor(**A_ )
# save in new folder
model_config.save_pretrained(A_ )
config.save_pretrained(A_ )
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(A_ )
# make sure private variable is not incorrectly saved
lowerCamelCase_ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(A_ , A_ )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained('bert-base' )
def a__ ( self : Any ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(A_ , revision='aaaaaa' )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
A_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
with self.assertRaises(A_ ):
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A_ ):
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=A_ )
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=A_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(A_ )
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(A_ , trust_remote_code=A_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
try:
AutoConfig.register('custom' , A_ )
AutoFeatureExtractor.register(A_ , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
AutoFeatureExtractor.register(A_ , A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase_ = CustomFeatureExtractor.from_pretrained(A_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(A_ )
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def a__ ( self : Any ) -> Any:
"""simple docstring"""
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = True
try:
AutoConfig.register('custom' , A_ )
AutoFeatureExtractor.register(A_ , A_ )
# If remote code is not set, the default is to use local
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=A_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=A_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(A_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 204 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Union[str, Any] = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 204 | 1 |
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __A ( a_ :List[Any]=None , a_ :Tuple=None) -> List[Any]:
return field(default_factory=lambda: default , metadata=a_)
@dataclass
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = field(
metadata={'''help''': '''The csv file to plot.'''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
__lowerCAmelCase = list_field(
default=_UpperCamelCase , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def __A ( a_ :Optional[Any]) -> Any:
try:
int(a_)
return True
except ValueError:
return False
def __A ( a_ :List[Any]) -> Any:
try:
float(a_)
return True
except ValueError:
return False
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
__a : Dict = args
__a : Tuple = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
__a : int = csv.DictReader(_UpperCAmelCase )
for row in reader:
__a : Union[str, Any] = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
__a : Optional[int] = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
__a : Optional[Any] = float(row['''result'''] )
def _lowerCamelCase ( self ):
__a : Optional[int] = plt.subplots()
__a : str = '''Time usage''' if self.args.is_time else '''Memory usage'''
__a : str = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__a : str = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
__a : Dict = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
__a : Dict = self.result_dict[model_name]['''result''']
(__a) : List[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__a : Any = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__a : Optional[int] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_UpperCAmelCase , )
else:
__a : Dict = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
(__a) : Union[str, Any] = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
__a : Any = np.asarray(_UpperCAmelCase , _UpperCAmelCase )[: len(_UpperCAmelCase )]
plt.scatter(
_UpperCAmelCase , _UpperCAmelCase , label=f"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(_UpperCAmelCase , _UpperCAmelCase , '''--''' )
title_str += f""" {label_model_name} vs."""
__a : Optional[Any] = title_str[:-4]
__a : Optional[int] = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(_UpperCAmelCase )
plt.xlabel(_UpperCAmelCase )
plt.ylabel(_UpperCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __A ( ) -> List[str]:
__a : List[str] = HfArgumentParser(a_)
__a : Optional[int] = parser.parse_args_into_dataclasses()[0]
__a : Tuple = Plot(args=a_)
plot.plot()
if __name__ == "__main__":
main() | 368 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 188 | 0 |
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def lowerCAmelCase_ ( snake_case_ : SplitDict ) ->str:
lowerCamelCase__ : str =split_dict._to_yaml_list()
assert len(snake_case_ ) == len(snake_case_ )
lowerCamelCase__ : Optional[Any] =SplitDict._from_yaml_list(snake_case_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCamelCase__ : Dict =None
# the split name of split_dict takes over the name of the split info object
lowerCamelCase__ : Optional[int] =split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=snake_case_ ), SplitInfo(dataset_name='my_dataset' )] )
def lowerCAmelCase_ ( snake_case_ : List[str] ) ->Union[str, Any]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCamelCase__ : List[str] =asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name | 126 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AudioLDMPipeline
SCREAMING_SNAKE_CASE_ = TEXT_TO_AUDIO_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_TO_AUDIO_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowerCamelCase_ , )
lowerCamelCase__ : Any =DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
lowerCamelCase__ : Any =ClapTextModelWithProjection(lowerCamelCase_ )
lowerCamelCase__ : str =RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77 )
lowerCamelCase__ : int =SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowerCamelCase_ , )
lowerCamelCase__ : Optional[Any] =SpeechTaHifiGan(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any]=0 ):
"""simple docstring"""
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : Optional[int] =torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : int =torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] ={
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Any =self.get_dummy_components()
lowerCamelCase__ : List[str] =AudioLDMPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : int =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =audioldm_pipe(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) == 256
lowerCamelCase__ : int =audio[:10]
lowerCamelCase__ : Dict =np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.get_dummy_components()
lowerCamelCase__ : str =AudioLDMPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =audioldm_pipe.to(lowerCamelCase_ )
lowerCamelCase__ : Tuple =audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : List[str] =3 * [inputs['prompt']]
# forward
lowerCamelCase__ : int =audioldm_pipe(**lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =output.audios[0]
lowerCamelCase__ : List[Any] =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : List[Any] =3 * [inputs.pop('prompt' )]
lowerCamelCase__ : List[str] =audioldm_pipe.tokenizer(
lowerCamelCase_ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors='pt' , )
lowerCamelCase__ : Optional[int] =text_inputs['input_ids'].to(lowerCamelCase_ )
lowerCamelCase__ : str =audioldm_pipe.text_encoder(
lowerCamelCase_ , )
lowerCamelCase__ : Any =prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCamelCase__ : Tuple =F.normalize(lowerCamelCase_ , dim=-1 )
lowerCamelCase__ : int =prompt_embeds
# forward
lowerCamelCase__ : List[str] =audioldm_pipe(**lowerCamelCase_ )
lowerCamelCase__ : Tuple =output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.get_dummy_components()
lowerCamelCase__ : Union[str, Any] =AudioLDMPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =audioldm_pipe.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Any =3 * ['this is a negative prompt']
lowerCamelCase__ : Union[str, Any] =negative_prompt
lowerCamelCase__ : Union[str, Any] =3 * [inputs['prompt']]
# forward
lowerCamelCase__ : List[Any] =audioldm_pipe(**lowerCamelCase_ )
lowerCamelCase__ : Dict =output.audios[0]
lowerCamelCase__ : Optional[int] =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : List[Any] =3 * [inputs.pop('prompt' )]
lowerCamelCase__ : Union[str, Any] =[]
for p in [prompt, negative_prompt]:
lowerCamelCase__ : List[str] =audioldm_pipe.tokenizer(
lowerCamelCase_ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors='pt' , )
lowerCamelCase__ : Optional[Any] =text_inputs['input_ids'].to(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =audioldm_pipe.text_encoder(
lowerCamelCase_ , )
lowerCamelCase__ : List[str] =text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCamelCase__ : List[Any] =F.normalize(lowerCamelCase_ , dim=-1 )
embeds.append(lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =embeds
# forward
lowerCamelCase__ : Any =audioldm_pipe(**lowerCamelCase_ )
lowerCamelCase__ : Dict =output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : List[Any] =self.get_dummy_components()
lowerCamelCase__ : List[Any] =PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
lowerCamelCase__ : int =AudioLDMPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] ='egg cracking'
lowerCamelCase__ : int =audioldm_pipe(**lowerCamelCase_ , negative_prompt=lowerCamelCase_ )
lowerCamelCase__ : Dict =output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) == 256
lowerCamelCase__ : Optional[Any] =audio[:10]
lowerCamelCase__ : str =np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : Tuple ='cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : int =self.get_dummy_components()
lowerCamelCase__ : Optional[Any] =PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =AudioLDMPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] ='A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
lowerCamelCase__ : int =audioldm_pipe(lowerCamelCase_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowerCamelCase__ : Union[str, Any] =2
lowerCamelCase__ : Optional[int] =audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
lowerCamelCase__ : Optional[Any] =2
lowerCamelCase__ : List[Any] =audioldm_pipe(lowerCamelCase_ , num_inference_steps=2 , num_waveforms_per_prompt=lowerCamelCase_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
lowerCamelCase__ : List[Any] =2
lowerCamelCase__ : str =audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowerCamelCase_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : str ='cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : Tuple =AudioLDMPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Dict =audioldm_pipe.vocoder.config.sampling_rate
lowerCamelCase__ : int =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Any =audioldm_pipe(audio_length_in_s=0.0_16 , **lowerCamelCase_ )
lowerCamelCase__ : List[Any] =output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) / vocoder_sampling_rate == 0.0_16
lowerCamelCase__ : Optional[Any] =audioldm_pipe(audio_length_in_s=0.0_32 , **lowerCamelCase_ )
lowerCamelCase__ : Any =output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) / vocoder_sampling_rate == 0.0_32
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : Any =self.get_dummy_components()
lowerCamelCase__ : List[Any] =AudioLDMPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Tuple =audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any =['hey']
lowerCamelCase__ : Dict =audioldm_pipe(lowerCamelCase_ , num_inference_steps=1 )
lowerCamelCase__ : int =output.audios.shape
assert audio_shape == (1, 256)
lowerCamelCase__ : Union[str, Any] =audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowerCamelCase__ : Tuple =SpeechTaHifiGan(lowerCamelCase_ ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple =audioldm_pipe(lowerCamelCase_ , num_inference_steps=1 )
lowerCamelCase__ : List[str] =output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase_ )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowerCamelCase_ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase_ )
@slow
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :List[str]="cpu" , lowerCamelCase_ :Tuple=torch.floataa , lowerCamelCase_ :str=0 ):
"""simple docstring"""
lowerCamelCase__ : Tuple =torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : List[str] =np.random.RandomState(lowerCamelCase_ ).standard_normal((1, 8, 128, 16) )
lowerCamelCase__ : Tuple =torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ , dtype=lowerCamelCase_ )
lowerCamelCase__ : Dict ={
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : int =AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
lowerCamelCase__ : List[Any] =audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Dict =self.get_inputs(lowerCamelCase_ )
lowerCamelCase__ : Any =25
lowerCamelCase__ : List[Any] =audioldm_pipe(**lowerCamelCase_ ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) == 81_920
lowerCamelCase__ : Dict =audio[77_230:77_240]
lowerCamelCase__ : List[Any] =np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] )
lowerCamelCase__ : Any =np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Any =AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
lowerCamelCase__ : int =LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowerCamelCase__ : List[Any] =audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =self.get_inputs(lowerCamelCase_ )
lowerCamelCase__ : List[Any] =audioldm_pipe(**lowerCamelCase_ ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) == 81_920
lowerCamelCase__ : Union[str, Any] =audio[27_780:27_790]
lowerCamelCase__ : Dict =np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] )
lowerCamelCase__ : Tuple =np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2 | 126 | 1 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : Any = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class A__ ( A__ , unittest.TestCase ):
A__ = SpeechTaTokenizer
A__ = False
A__ = True
def A ( self : List[str] ) -> Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_SCREAMING_SNAKE_CASE =SpeechTaTokenizer(_a )
_SCREAMING_SNAKE_CASE =AddedToken('<mask>' , lstrip=_a , rstrip=_a )
_SCREAMING_SNAKE_CASE =mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Optional[int] , _a : Dict ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='this is a test'
_SCREAMING_SNAKE_CASE ='this is a test'
return input_text, output_text
def A ( self : List[str] , _a : int , _a : Union[str, Any]=False , _a : Tuple=20 , _a : Dict=5 ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.get_input_output_texts(_a )
_SCREAMING_SNAKE_CASE =tokenizer.encode(_a , add_special_tokens=_a )
_SCREAMING_SNAKE_CASE =tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
return text, ids
def A ( self : int ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='<pad>'
_SCREAMING_SNAKE_CASE =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def A ( self : Dict ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(_a ) , 81 )
def A ( self : Any ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def A ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_SCREAMING_SNAKE_CASE =tokenizer.vocab_size
_SCREAMING_SNAKE_CASE =len(_a )
self.assertNotEqual(_a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_SCREAMING_SNAKE_CASE =['aaaaa bbbbbb', 'cccccccccdddddddd']
_SCREAMING_SNAKE_CASE =tokenizer.add_tokens(_a )
_SCREAMING_SNAKE_CASE =tokenizer.vocab_size
_SCREAMING_SNAKE_CASE =len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size + len(_a ) )
_SCREAMING_SNAKE_CASE =tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_SCREAMING_SNAKE_CASE ={'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_SCREAMING_SNAKE_CASE =tokenizer.add_special_tokens(_a )
_SCREAMING_SNAKE_CASE =tokenizer.vocab_size
_SCREAMING_SNAKE_CASE =len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size_a + len(_a ) )
_SCREAMING_SNAKE_CASE =tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def A ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def A ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
pass
def A ( self : str ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(_a , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_SCREAMING_SNAKE_CASE =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
_SCREAMING_SNAKE_CASE =tokenizer.convert_tokens_to_ids(_a )
# fmt: off
self.assertListEqual(_a , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_SCREAMING_SNAKE_CASE =tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def A ( self : List[Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
_SCREAMING_SNAKE_CASE ={
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=_a , )
| 358 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[int] = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 114 | 0 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : str ) -> float:
def get_matched_characters(lowerCamelCase__ : str , lowerCamelCase__ : str ) -> str:
lowerCamelCase_ : Optional[int] =[]
lowerCamelCase_ : Optional[int] =min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowerCamelCase_ : Union[str, Any] =int(max(0 , i - limit ) )
lowerCamelCase_ : str =int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCamelCase__ )
lowerCamelCase_ : int =F"""{_stra[0:_stra.index(lowerCamelCase__ )]} {_stra[_stra.index(lowerCamelCase__ ) + 1:]}"""
return "".join(lowerCamelCase__ )
# matching characters
lowerCamelCase_ : Optional[int] =get_matched_characters(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =get_matched_characters(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Optional[int] =len(lowerCamelCase__ )
# transposition
lowerCamelCase_ : Any =(
len([(ca, ca) for ca, ca in zip(lowerCamelCase__ , lowerCamelCase__ ) if ca != ca] ) // 2
)
if not match_count:
lowerCamelCase_ : Tuple =0.0
else:
lowerCamelCase_ : int =(
1
/ 3
* (
match_count / len(lowerCamelCase__ )
+ match_count / len(lowerCamelCase__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowerCamelCase_ : str =0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 144 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCamelCase__ : float , lowerCamelCase__ : float ) -> tuple:
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 144 | 1 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[Any] = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
__snake_case: Optional[Any] = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
__snake_case: int = """The dog is cute and lives in the garden house"""
__snake_case: Any = jnp.array([tokenizer.encode(A )] )
__snake_case: Dict = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
__snake_case: int = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
__snake_case: int = model(A )["""last_hidden_state"""]
self.assertEqual(output.shape , A )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , A , atol=1E-3 ) )
| 358 |
import argparse
from collections import defaultdict
import yaml
__UpperCAmelCase : int = "docs/source/en/_toctree.yml"
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
__snake_case: Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE__)
for doc in model_doc:
counts[doc["local"]] += 1
__snake_case: Dict = [key for key, value in counts.items() if value > 1]
__snake_case: Optional[Any] = []
for duplicate_key in duplicates:
__snake_case: Tuple = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key})
if len(SCREAMING_SNAKE_CASE__) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""")
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1])
# Sort
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__: s["title"].lower())
def A__ ( SCREAMING_SNAKE_CASE__=False) -> List[str]:
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""") as f:
__snake_case: Optional[int] = yaml.safe_load(f.read())
# Get to the API doc
__snake_case: Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case: str = content[api_idx]["""sections"""]
# Then to the model doc
__snake_case: List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__snake_case: Dict = api_doc[model_idx]["""sections"""]
__snake_case: int = [(idx, section) for idx, section in enumerate(SCREAMING_SNAKE_CASE__) if """sections""" in section]
__snake_case: Optional[int] = False
for idx, modality_doc in modalities_docs:
__snake_case: Dict = modality_doc["""sections"""]
__snake_case: List[str] = clean_model_doc_toc(SCREAMING_SNAKE_CASE__)
if old_modality_doc != new_modality_doc:
__snake_case: List[str] = True
if overwrite:
__snake_case: Dict = new_modality_doc
if diff:
if overwrite:
__snake_case: Dict = model_doc
__snake_case: int = api_doc
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""") as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE__ , allow_unicode=SCREAMING_SNAKE_CASE__))
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""")
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__UpperCAmelCase : str = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 293 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : int = "bert-generation"
def __init__( self : List[Any] , A : Optional[int]=5_03_58 , A : List[Any]=10_24 , A : str=24 , A : Optional[Any]=16 , A : int=40_96 , A : Tuple="gelu" , A : List[Any]=0.1 , A : Dict=0.1 , A : Optional[Any]=5_12 , A : List[str]=0.02 , A : Optional[int]=1e-12 , A : List[Any]=0 , A : Tuple=2 , A : List[Any]=1 , A : Dict="absolute" , A : Optional[int]=True , **A : List[str] , ) -> List[str]:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
lowercase_ : int = vocab_size
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : int = hidden_act
lowercase_ : List[str] = intermediate_size
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : Any = attention_probs_dropout_prob
lowercase_ : List[str] = max_position_embeddings
lowercase_ : int = initializer_range
lowercase_ : Optional[int] = layer_norm_eps
lowercase_ : Optional[int] = position_embedding_type
lowercase_ : List[str] = use_cache
| 33 |
"""simple docstring"""
import os
from collections.abc import Iterator
def __SCREAMING_SNAKE_CASE ( A_ = "." ):
for dir_path, dir_names, filenames in os.walk(A_ ):
lowerCAmelCase__ : str = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A_ )[1] in (".py", ".ipynb"):
yield os.path.join(A_ , A_ ).lstrip('''./''' )
def __SCREAMING_SNAKE_CASE ( A_ ):
return f'{i * " "}*' if i else "\n##"
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A_ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(A_ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def __SCREAMING_SNAKE_CASE ( A_ = "." ):
lowerCAmelCase__ : Any = ''''''
for filepath in sorted(good_file_paths(A_ ) ):
lowerCAmelCase__ ,lowerCAmelCase__ : str = os.path.split(A_ )
if filepath != old_path:
lowerCAmelCase__ : str = print_path(A_ , A_ )
lowerCAmelCase__ : str = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase__ : Union[str, Any] = f'{filepath}/{filename}'.replace(''' ''' , '''%20''' )
lowerCAmelCase__ : List[str] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(f'{md_prefix(A_ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 106 | 0 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCAmelCase_ ( __a ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =[
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =emb.weight.shape
lowerCamelCase__: Dict =nn.Linear(__a , __a , bias=__a )
lowerCamelCase__: Optional[int] =emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: int =torch.load(__a , map_location="cpu" )
lowerCamelCase__: str =Namespace(**checkpoint["cfg"]["model"] )
lowerCamelCase__: Any =checkpoint["model"]
remove_ignore_keys_(__a )
lowerCamelCase__: Dict =state_dict["decoder.embed_tokens.weight"].shape[0]
lowerCamelCase__: Union[str, Any] ={key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
lowerCamelCase__: List[str] =XGLMConfig(
vocab_size=__a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCamelCase__: Optional[Any] =XGLMForCausalLM(__a )
lowerCamelCase__: str =model.load_state_dict(__a , strict=__a )
print(__a )
lowerCamelCase__: int =make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
__A = parser.parse_args()
__A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 370 |
from __future__ import annotations
from typing import Any
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : int) ->None:
'''simple docstring'''
lowerCamelCase__: int =num_of_nodes
lowerCamelCase__: list[list[int]] =[]
lowerCamelCase__: dict[int, int] ={}
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int) ->None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight])
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : int) ->int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : int) ->None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCamelCase__: Dict =self.find_component(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int) ->None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
lowerCamelCase__: Optional[int] =v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase_)
elif component_size[u_node] >= component_size[v_node]:
lowerCamelCase__: Tuple =self.find_component(UpperCAmelCase_)
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->None:
'''simple docstring'''
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: List[str] =0
lowerCamelCase__: list[Any] =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
lowerCamelCase__: List[str] =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =edge
lowerCamelCase__: List[Any] =self.m_component[u]
lowerCamelCase__: str =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCamelCase__: Union[str, Any] =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =edge
lowerCamelCase__: str =self.m_component[u]
lowerCamelCase__: Any =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
lowerCamelCase__: Tuple =[-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""")
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 | 0 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir('''fixtures/test_sentencepiece.model''')
__snake_case = {"target_lang": "fi", "source_lang": "en"}
__snake_case = ">>zh<<"
__snake_case = "Helsinki-NLP/"
if is_torch_available():
__snake_case = "pt"
elif is_tf_available():
__snake_case = "tf"
else:
__snake_case = "jax"
@require_sentencepiece
class __lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = MarianTokenizer
A_ : Any = False
A_ : Any = True
def _UpperCAmelCase ( self ) -> List[Any]:
super().setUp()
_a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_a = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_a = Path(self.tmpdirname )
save_json(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
_a = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
return (
"This is a test",
"This is a test",
)
def _UpperCAmelCase ( self ) -> List[str]:
_a = "</s>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(__UpperCAmelCase ) , 9 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def _UpperCAmelCase ( self ) -> List[Any]:
_a = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
_a = en_de_tokenizer(['''I am a small frog'''] , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_a = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(__UpperCAmelCase , batch.input_ids[0] )
_a = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__UpperCAmelCase )
_a = [x.name for x in Path(__UpperCAmelCase ).glob('''*''' )]
self.assertIn('''source.spm''' , __UpperCAmelCase )
MarianTokenizer.from_pretrained(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.get_tokenizer()
_a = tok(
['''I am a small frog''' * 1000, '''I am a small frog'''] , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def _UpperCAmelCase ( self ) -> Dict:
_a = self.get_tokenizer()
_a = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
# fmt: off
_a = {"input_ids": [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def _UpperCAmelCase ( self ) -> Dict:
_a = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
_a = "Tämä on testi"
_a = "This is a test"
_a = [76, 7, 2047, 2]
_a = [69, 12, 11, 940, 2]
_a = tokenizer(__UpperCAmelCase ).input_ids
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokenizer(text_target=__UpperCAmelCase ).input_ids
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) | 320 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Optional[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Any = "data2vec-vision"
def __init__( self : Optional[int] , lowerCamelCase : int=768 , lowerCamelCase : Optional[Any]=12 , lowerCamelCase : Optional[int]=12 , lowerCamelCase : Union[str, Any]=3072 , lowerCamelCase : Tuple="gelu" , lowerCamelCase : List[Any]=0.0 , lowerCamelCase : Tuple=0.0 , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : int=1E-12 , lowerCamelCase : Optional[int]=224 , lowerCamelCase : List[str]=16 , lowerCamelCase : str=3 , lowerCamelCase : Any=False , lowerCamelCase : Tuple=False , lowerCamelCase : List[str]=False , lowerCamelCase : Optional[int]=False , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Dict=True , lowerCamelCase : Tuple=[3, 5, 7, 11] , lowerCamelCase : Union[str, Any]=[1, 2, 3, 6] , lowerCamelCase : List[str]=True , lowerCamelCase : int=0.4 , lowerCamelCase : Optional[int]=256 , lowerCamelCase : Tuple=1 , lowerCamelCase : Tuple=False , lowerCamelCase : Any=255 , **lowerCamelCase : str , ) -> Optional[int]:
super().__init__(**lowerCamelCase )
__snake_case : Dict = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Tuple = intermediate_size
__snake_case : int = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Tuple = image_size
__snake_case : Tuple = patch_size
__snake_case : Optional[Any] = num_channels
__snake_case : Optional[Any] = use_mask_token
__snake_case : Dict = use_absolute_position_embeddings
__snake_case : Optional[Any] = use_relative_position_bias
__snake_case : Any = use_shared_relative_position_bias
__snake_case : Union[str, Any] = layer_scale_init_value
__snake_case : List[Any] = drop_path_rate
__snake_case : Any = use_mean_pooling
# decode head attributes (semantic segmentation)
__snake_case : Optional[int] = out_indices
__snake_case : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
__snake_case : int = use_auxiliary_head
__snake_case : Optional[Any] = auxiliary_loss_weight
__snake_case : Optional[int] = auxiliary_channels
__snake_case : str = auxiliary_num_convs
__snake_case : Any = auxiliary_concat_input
__snake_case : Optional[Any] = semantic_loss_ignore_index
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = version.parse("1.11" )
@property
def __snake_case ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __snake_case ( self : List[Any] ) -> float:
return 1E-4
| 123 | 0 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__UpperCAmelCase :List[Any] = datasets.logging.get_logger(__name__)
__UpperCAmelCase :Optional[Any] = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
__UpperCAmelCase :Tuple = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
__UpperCAmelCase :int = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Optional[int] ) -> List[Any]:
if self.config_name == "default":
__UpperCAmelCase : Dict = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
__UpperCAmelCase : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCamelCase__ ( self : Tuple , snake_case : List[Any] , snake_case : List[Any] , snake_case : Dict , snake_case : Optional[int]=None , snake_case : Optional[int]=False ) -> List[str]:
if gpus is None:
__UpperCAmelCase : Any = 1 if torch.cuda.is_available() else 0
__UpperCAmelCase : List[str] = {'''src''': sources, '''mt''': predictions, '''ref''': references}
__UpperCAmelCase : int = [dict(zip(snake_case , snake_case ) ) for t in zip(*data.values() )]
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.scorer.predict(snake_case , gpus=snake_case , progress_bar=snake_case )
return {"mean_score": mean_score, "scores": scores} | 240 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase :List[Any] = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :List[Any] = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :List[Any] = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :List[str] = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
__UpperCAmelCase :int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 240 | 1 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
a__ : List[str] ='''naver-clova-ix/donut-base'''
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = DonutProcessor.from_pretrained(__A )
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
__UpperCamelCase = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
__UpperCamelCase = self.processor.tokenajson(__A )
self.assertDictEqual(__A , __A )
| 53 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__snake_case : Any =2_0_4_8
__snake_case : Union[str, Any] =4_0_9_6
__snake_case : Optional[Any] =4_2
__snake_case : Dict =os.environ.pop('PROCESS_TRAIN', 'false')
__snake_case : List[str] ={'null': 0, 'short': 1, 'long': 2, 'yes': 3, 'no': 4}
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any]):
'''simple docstring'''
def choose_first(lowerCamelCase_ : List[str] ,lowerCamelCase_ : Any=False):
assert isinstance(lowerCamelCase_ ,lowerCamelCase_)
if len(lowerCamelCase_) == 1:
lowerCAmelCase__ : Optional[int] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowerCAmelCase__ : Any = {k: [a[k]] for k in a}
if len(a['''start_token''']) > 0:
break
return a
lowerCAmelCase__ : Optional[Any] = {'''id''': example['''id''']}
lowerCAmelCase__ : int = example['''annotations''']
lowerCAmelCase__ : str = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
lowerCAmelCase__ : Union[str, Any] = ['''yes'''] if 1 in yes_no_answer else ['''no''']
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : int = ['''<cls>''']
else:
lowerCAmelCase__ : Tuple = ['''short''']
lowerCAmelCase__ : int = choose_first(annotation['''short_answers'''])
if len(out['''start_token''']) == 0:
# answer will be long if short is not available
lowerCAmelCase__ : Optional[Any] = ['''long''']
lowerCAmelCase__ : str = choose_first(annotation['''long_answer'''] ,is_long_answer=lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = []
answer.update(lowerCamelCase_)
# disregard some samples
if len(answer['''start_token''']) > 1 or answer["start_token"] == answer["end_token"]:
lowerCAmelCase__ : Optional[Any] = True
else:
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : Tuple = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] ,lowerCamelCase_) for k in cols):
raise ValueError('''Issue in ID''' ,example['''id'''])
return answer
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Union[str, Any]=False):
'''simple docstring'''
lowerCAmelCase__ : Any = _get_single_answer(lowerCamelCase_)
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCAmelCase__ : List[Any] = example['''document''']['''tokens''']
lowerCAmelCase__ : Any = []
for i in range(len(doc['''token'''])):
if not doc["is_html"][i]:
context.append(doc['''token'''][i])
return {
"context": " ".join(lowerCamelCase_),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowerCAmelCase__ : Union[str, Any] = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k]) > 0 else answer[k] for k in cols}) # e.g. [10] == 10
lowerCAmelCase__ : List[Any] = example['''document''']['''tokens''']
lowerCAmelCase__ : Optional[Any] = answer['''start_token''']
lowerCAmelCase__ : Union[str, Any] = answer['''end_token''']
lowerCAmelCase__ : int = []
for i in range(len(doc['''token'''])):
if not doc["is_html"][i]:
context.append(doc['''token'''][i])
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowerCAmelCase__ : List[Any] = ''' '''.join(context[start_token:end_token])
# checking above code
if assertion:
lowerCAmelCase__ : str = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
lowerCAmelCase__ : List[Any] = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
lowerCAmelCase__ : Optional[int] = ''' '''.join([old[i] for i in range(len(lowerCamelCase_)) if not is_html[i]])
if new != old:
print('''ID:''' ,example['''id'''])
print('''New:''' ,lowerCamelCase_ ,end='''\n''')
print('''Old:''' ,lowerCamelCase_ ,end='''\n\n''')
return {
"context": " ".join(lowerCamelCase_),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : str ,lowerCamelCase_ : Tuple=2048 ,lowerCamelCase_ : Dict=4096 ,lowerCamelCase_ : Optional[Any]=True):
'''simple docstring'''
lowerCAmelCase__ : int = get_context_and_ans(lowerCamelCase_ ,assertion=lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowerCAmelCase__ : Union[str, Any] = tokenizer(example['''question''']['''text'''] ,out['''context''']).input_ids
lowerCAmelCase__ : List[str] = input_ids.index(tokenizer.sep_token_id) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : List[Any] = input_ids[:q_len]
lowerCAmelCase__ : List[Any] = range(lowerCamelCase_ ,len(lowerCamelCase_) ,max_length - doc_stride)
for i in doc_start_indices:
lowerCAmelCase__ : Union[str, Any] = i + max_length - q_len
lowerCAmelCase__ : Any = input_ids[i:end_index]
inputs.append(q_indices + slice)
category.append(answer['''category'''][0])
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowerCamelCase_),
"end_token": [-100] * len(lowerCamelCase_),
"category": category,
},
}
lowerCAmelCase__ : Optional[Any] = out['''context'''].split()
lowerCAmelCase__ : Union[str, Any] = splitted_context[answer['''end_token''']]
lowerCAmelCase__ : Optional[int] = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']]) ,add_special_tokens=lowerCamelCase_ ,).input_ids)
lowerCAmelCase__ : Dict = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']]) ,add_special_tokens=lowerCamelCase_).input_ids)
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowerCAmelCase__ : int = len(tokenizer(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_).input_ids)
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowerCAmelCase__ : Union[str, Any] = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
lowerCAmelCase__ : List[str] = answer['''start_token''']
lowerCAmelCase__ : Union[str, Any] = answer['''end_token''']
if assertion:
lowerCAmelCase__ : int = tokenizer.decode(lowerCamelCase_)
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''')
print('''OLD:''' ,answer['''span'''])
print('''NEW:''' ,lowerCamelCase_ ,end='''\n\n''')
if len(lowerCamelCase_) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowerCAmelCase__ : int = input_ids[:q_len]
lowerCAmelCase__ : Optional[Any] = range(lowerCamelCase_ ,len(lowerCamelCase_) ,max_length - doc_stride)
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : Any = [] # null, yes, no, long, short
for i in doc_start_indices:
lowerCAmelCase__ : str = i + max_length - q_len
lowerCAmelCase__ : List[str] = input_ids[i:end_index]
inputs.append(q_indices + slice)
assert len(inputs[-1]) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowerCAmelCase__ : int = start_token - i + q_len
lowerCAmelCase__ : str = end_token - i + q_len
answers_category.append(answer['''category'''][0]) # ["short"] -> "short"
else:
lowerCAmelCase__ : Tuple = -100
lowerCAmelCase__ : List[str] = -100
answers_category.append('''null''')
lowerCAmelCase__ : int = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase_)
answers_end_token.append(lowerCamelCase_)
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' ,example['''id'''])
print('''New:''' ,tokenizer.decode(lowerCamelCase_))
print('''Old:''' ,tokenizer.decode(lowerCamelCase_) ,end='''\n\n''')
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : List[Any] ,lowerCamelCase_ : int=2048 ,lowerCamelCase_ : Tuple=4096 ,lowerCamelCase_ : Optional[int]=False):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = get_strided_contexts_and_ans(
lowerCamelCase_ ,lowerCamelCase_ ,doc_stride=lowerCamelCase_ ,max_length=lowerCamelCase_ ,assertion=lowerCamelCase_ ,)
return example
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : int):
'''simple docstring'''
with jsonlines.open(lowerCamelCase_ ,'''a''') as writer:
for example in tqdm(lowerCamelCase_ ,total=len(lowerCamelCase_) ,desc='''Saving samples ... '''):
lowerCAmelCase__ : Optional[Any] = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] ,labels['''start_token'''] ,labels['''end_token'''] ,labels['''category'''] ,):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
})
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__snake_case : Optional[int] =load_dataset('natural_questions')
__snake_case : Union[str, Any] =BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
__snake_case : Tuple =data['train' if PROCESS_TRAIN == 'true' else 'validation']
__snake_case : Optional[int] ={
'tokenizer': tokenizer,
'doc_stride': DOC_STRIDE,
'max_length': MAX_LENGTH,
'assertion': False,
}
__snake_case : Dict =data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__snake_case : Dict =data.remove_columns(['annotations', 'document', 'id', 'question'])
print(data)
np.random.seed(SEED)
__snake_case : int ='nq-training.jsonl' if PROCESS_TRAIN == 'true' else 'nq-validation.jsonl'
save_to_disk(data, file_name=cache_file_name)
| 129 | 0 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
UpperCAmelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
UpperCAmelCase = F'''down_blocks.{i}.resnets.{j}.'''
UpperCAmelCase = F'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
UpperCAmelCase = F'''down_blocks.{i}.attentions.{j}.'''
UpperCAmelCase = F'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
UpperCAmelCase = F'''up_blocks.{i}.resnets.{j}.'''
UpperCAmelCase = F'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
UpperCAmelCase = F'''up_blocks.{i}.attentions.{j}.'''
UpperCAmelCase = F'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
UpperCAmelCase = F'''down_blocks.{i}.downsamplers.0.conv.'''
UpperCAmelCase = F'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
UpperCAmelCase = F'''up_blocks.{i}.upsamplers.0.'''
UpperCAmelCase = F'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
UpperCAmelCase = """mid_block.attentions.0."""
UpperCAmelCase = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
UpperCAmelCase = F'''mid_block.resnets.{j}.'''
UpperCAmelCase = F'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowercase ( a__ : Optional[Any] ) -> Dict:
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
_UpperCamelCase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
_UpperCamelCase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
_UpperCamelCase = v.replace(a__ , a__ )
_UpperCamelCase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
_UpperCamelCase = v.replace(a__ , a__ )
_UpperCamelCase = v
_UpperCamelCase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
UpperCAmelCase = F'''encoder.down_blocks.{i}.resnets.{j}.'''
UpperCAmelCase = F'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
UpperCAmelCase = F'''down_blocks.{i}.downsamplers.0.'''
UpperCAmelCase = F'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
UpperCAmelCase = F'''up_blocks.{i}.upsamplers.0.'''
UpperCAmelCase = F'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
UpperCAmelCase = F'''decoder.up_blocks.{i}.resnets.{j}.'''
UpperCAmelCase = F'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
UpperCAmelCase = F'''mid_block.resnets.{i}.'''
UpperCAmelCase = F'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def lowercase ( a__ : str ) -> Union[str, Any]:
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def lowercase ( a__ : Optional[Any] ) -> Optional[int]:
_UpperCamelCase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
_UpperCamelCase = v.replace(a__ , a__ )
_UpperCamelCase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
_UpperCamelCase = v.replace(a__ , a__ )
_UpperCamelCase = v
_UpperCamelCase = {v: vae_state_dict[k] for k, v in mapping.items()}
_UpperCamelCase = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'''mid.attn_1.{weight_name}.weight''' in k:
print(F'''Reshaping {k} for SD format''' )
_UpperCamelCase = reshape_weight_for_sd(a__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
UpperCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
UpperCAmelCase = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
UpperCAmelCase = {"""q""": 0, """k""": 1, """v""": 2}
def lowercase ( a__ : List[str] ) -> str:
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
_UpperCamelCase = k[: -len('''.q_proj.weight''' )]
_UpperCamelCase = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
_UpperCamelCase = [None, None, None]
_UpperCamelCase = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
_UpperCamelCase = k[: -len('''.q_proj.bias''' )]
_UpperCamelCase = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
_UpperCamelCase = [None, None, None]
_UpperCamelCase = v
continue
_UpperCamelCase = textenc_pattern.sub(lambda a__ : protected[re.escape(m.group(0 ) )] , a__ )
_UpperCamelCase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
_UpperCamelCase = textenc_pattern.sub(lambda a__ : protected[re.escape(m.group(0 ) )] , a__ )
_UpperCamelCase = torch.cat(a__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
_UpperCamelCase = textenc_pattern.sub(lambda a__ : protected[re.escape(m.group(0 ) )] , a__ )
_UpperCamelCase = torch.cat(a__ )
return new_state_dict
def lowercase ( a__ : Union[str, Any] ) -> List[str]:
return text_enc_dict
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
UpperCAmelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
UpperCAmelCase = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
UpperCAmelCase = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
UpperCAmelCase = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
UpperCAmelCase = load_file(unet_path, device="""cpu""")
else:
UpperCAmelCase = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
UpperCAmelCase = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
UpperCAmelCase = load_file(vae_path, device="""cpu""")
else:
UpperCAmelCase = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
UpperCAmelCase = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
UpperCAmelCase = load_file(text_enc_path, device="""cpu""")
else:
UpperCAmelCase = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
UpperCAmelCase = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
UpperCAmelCase = convert_unet_state_dict(unet_state_dict)
UpperCAmelCase = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
UpperCAmelCase = convert_vae_state_dict(vae_state_dict)
UpperCAmelCase = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
UpperCAmelCase = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
UpperCAmelCase = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
UpperCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
UpperCAmelCase = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
UpperCAmelCase = convert_text_enc_state_dict(text_enc_dict)
UpperCAmelCase = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
UpperCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
UpperCAmelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
UpperCAmelCase = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 353 | """simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
UpperCAmelCase = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
"""
UpperCAmelCase = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
UpperCAmelCase = """
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"pearson\": Pearson Correlation
\"spearmanr\": Spearman Correlation
\"matthews_correlation\": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'stsb')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})
{'pearson': 1.0, 'spearmanr': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'cola')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def lowercase ( a__ : int , a__ : Tuple ) -> Optional[Any]:
return float((preds == labels).mean() )
def lowercase ( a__ : Optional[Any] , a__ : int ) -> Optional[int]:
_UpperCamelCase = simple_accuracy(a__ , a__ )
_UpperCamelCase = float(fa_score(y_true=a__ , y_pred=a__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase ( a__ : Any , a__ : Union[str, Any] ) -> Any:
_UpperCamelCase = float(pearsonr(a__ , a__ )[0] )
_UpperCamelCase = float(spearmanr(a__ , a__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def _UpperCamelCase ( self : int , __UpperCamelCase : int , __UpperCamelCase : List[Any] ) -> Any:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__UpperCamelCase , __UpperCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(__UpperCamelCase , __UpperCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__UpperCamelCase , __UpperCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__UpperCamelCase , __UpperCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 54 | 0 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_lowercase : Dict = imread(r"digital_image_processing/image_data/lena_small.jpg")
_lowercase : List[str] = cvtColor(img, COLOR_BGR2GRAY)
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Optional[int] = cn.convert_to_negative(__SCREAMING_SNAKE_CASE )
# assert negative_img array for at least one True
assert negative_img.any()
def snake_case_ ( ):
"""simple docstring"""
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__SCREAMING_SNAKE_CASE , 110 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Union[str, Any] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : int = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowercase_ : Dict = canny.canny(__SCREAMING_SNAKE_CASE )
# assert canny array for at least one True
assert canny_array.any()
def snake_case_ ( ):
"""simple docstring"""
assert gg.gaussian_filter(__SCREAMING_SNAKE_CASE , 5 , sigma=0.9 ).all()
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Tuple = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowercase_ : Optional[Any] = conv.img_convolve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
assert res.any()
def snake_case_ ( ):
"""simple docstring"""
assert med.median_filter(__SCREAMING_SNAKE_CASE , 3 ).any()
def snake_case_ ( ):
"""simple docstring"""
lowercase_ , lowercase_ : List[str] = sob.sobel_filter(__SCREAMING_SNAKE_CASE )
assert grad.any() and theta.any()
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : int = sp.make_sepia(__SCREAMING_SNAKE_CASE , 20 )
assert sepia.all()
def snake_case_ ( __SCREAMING_SNAKE_CASE : str = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
lowercase_ : int = bs.Burkes(imread(__SCREAMING_SNAKE_CASE , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def snake_case_ ( __SCREAMING_SNAKE_CASE : str = "digital_image_processing/image_data/lena_small.jpg" , ):
"""simple docstring"""
lowercase_ : Any = rs.NearestNeighbour(imread(__SCREAMING_SNAKE_CASE , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Optional[Any] = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowercase_ : List[Any] = imread(__SCREAMING_SNAKE_CASE , 0 )
# Test for get_neighbors_pixel function() return not None
lowercase_ : Tuple = 0
lowercase_ : List[Any] = 0
lowercase_ : Dict = image[x_coordinate][y_coordinate]
lowercase_ : Any = lbp.get_neighbors_pixel(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowercase_ : int = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowercase_ : Optional[Any] = lbp.local_binary_value(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert lbp_image.any()
| 93 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase__ :
def __init__( self ):
"""simple docstring"""
lowercase_ : int = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = probability
def _snake_case ( self ):
"""simple docstring"""
return list(self.connections )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = 0
lowercase_ : Tuple = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : list[tuple[str, str, float]] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = Counter(graph.get_nodes() )
lowercase_ : Any = start
for _ in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : int = graph.transition(__SCREAMING_SNAKE_CASE )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: str =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : int=False ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any]=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ = ""
else:
UpperCAmelCase_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( snake_case_ : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = dct.pop(snake_case_ )
UpperCAmelCase_ = val
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = ViTConfig()
UpperCAmelCase_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
UpperCAmelCase_ = True
UpperCAmelCase_ = int(vit_name[-12:-10] )
UpperCAmelCase_ = int(vit_name[-9:-6] )
else:
UpperCAmelCase_ = 10_00
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = int(vit_name[-6:-4] )
UpperCAmelCase_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
UpperCAmelCase_ = 1_92
UpperCAmelCase_ = 7_68
UpperCAmelCase_ = 12
UpperCAmelCase_ = 3
elif vit_name[9:].startswith("small" ):
UpperCAmelCase_ = 3_84
UpperCAmelCase_ = 15_36
UpperCAmelCase_ = 12
UpperCAmelCase_ = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
UpperCAmelCase_ = 7_68
UpperCAmelCase_ = 23_04
UpperCAmelCase_ = 8
UpperCAmelCase_ = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
UpperCAmelCase_ = 10_24
UpperCAmelCase_ = 40_96
UpperCAmelCase_ = 24
UpperCAmelCase_ = 16
elif vit_name[4:].startswith("huge" ):
UpperCAmelCase_ = 12_80
UpperCAmelCase_ = 51_20
UpperCAmelCase_ = 32
UpperCAmelCase_ = 16
# load original model from timm
UpperCAmelCase_ = timm.create_model(snake_case_ , pretrained=snake_case_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ = timm_model.state_dict()
if base_model:
remove_classification_head_(snake_case_ )
UpperCAmelCase_ = create_rename_keys(snake_case_ , snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_q_k_v(snake_case_ , snake_case_ , snake_case_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCAmelCase_ = ViTModel(snake_case_ ).eval()
else:
UpperCAmelCase_ = ViTForImageClassification(snake_case_ ).eval()
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
UpperCAmelCase_ = DeiTImageProcessor(size=config.image_size )
else:
UpperCAmelCase_ = ViTImageProcessor(size=config.image_size )
UpperCAmelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
UpperCAmelCase_ = model(snake_case_ )
if base_model:
UpperCAmelCase_ = timm_model.forward_features(snake_case_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(snake_case_ , outputs.pooler_output , atol=1E-3 )
else:
UpperCAmelCase_ = timm_model(snake_case_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 361 | '''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCAmelCase_ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCAmelCase_ = model(__a )["last_hidden_state"]
UpperCAmelCase_ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
UpperCAmelCase_ = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 106 | 0 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
SCREAMING_SNAKE_CASE : str = F'https://www.google.com/search?q={query}&num=100'
SCREAMING_SNAKE_CASE : Union[str, Any] = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE : List[Any] = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
SCREAMING_SNAKE_CASE : Optional[int] = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 102 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =ShapEPipeline
lowerCamelCase__ =['prompt']
lowerCamelCase__ =['prompt']
lowerCamelCase__ =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowerCamelCase__ =False
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 8
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__snake_case : Optional[int] = PriorTransformer(**a_ )
return model
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
__snake_case : List[Any] = ShapERenderer(**a_ )
return model
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.dummy_prior
__snake_case : str = self.dummy_text_encoder
__snake_case : str = self.dummy_tokenizer
__snake_case : Tuple = self.dummy_renderer
__snake_case : int = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=a_ , clip_sample=a_ , clip_sample_range=1.0 , )
__snake_case : Union[str, Any] = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def SCREAMING_SNAKE_CASE (self , a_ , a_=0 ):
'''simple docstring'''
if str(a_ ).startswith('''mps''' ):
__snake_case : Tuple = torch.manual_seed(a_ )
else:
__snake_case : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case : Optional[int] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = '''cpu'''
__snake_case : str = self.get_dummy_components()
__snake_case : List[Any] = self.pipeline_class(**a_ )
__snake_case : str = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(a_ ) )
__snake_case : List[str] = output.images[0]
__snake_case : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = torch_device == '''cpu'''
__snake_case : Tuple = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a_ , relax_max_difference=a_ , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.get_dummy_components()
__snake_case : int = self.pipeline_class(**a_ )
__snake_case : int = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = 1
__snake_case : List[Any] = 2
__snake_case : int = self.get_dummy_inputs(a_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Dict = batch_size * [inputs[key]]
__snake_case : Union[str, Any] = pipe(**a_ , num_images_per_prompt=a_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
__snake_case : Optional[Any] = ShapEPipeline.from_pretrained('''openai/shap-e''' )
__snake_case : Optional[int] = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case : str = pipe(
'''a shark''' , generator=a_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a_ , a_ )
| 102 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a : Optional[int] = logging.get_logger(__name__)
a : Union[str, Any] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _a ( __SCREAMING_SNAKE_CASE ):
A = 'marian'
A = ['past_key_values']
A = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self, SCREAMING_SNAKE_CASE_=58101, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=1024, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=4096, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=4096, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=1024, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=58100, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=58100, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
UpperCAmelCase_: str = vocab_size
UpperCAmelCase_: int = decoder_vocab_size or vocab_size
UpperCAmelCase_: int = max_position_embeddings
UpperCAmelCase_: Optional[int] = d_model
UpperCAmelCase_: List[str] = encoder_ffn_dim
UpperCAmelCase_: List[Any] = encoder_layers
UpperCAmelCase_: Tuple = encoder_attention_heads
UpperCAmelCase_: str = decoder_ffn_dim
UpperCAmelCase_: Tuple = decoder_layers
UpperCAmelCase_: int = decoder_attention_heads
UpperCAmelCase_: List[str] = dropout
UpperCAmelCase_: Optional[int] = attention_dropout
UpperCAmelCase_: Tuple = activation_dropout
UpperCAmelCase_: Tuple = activation_function
UpperCAmelCase_: str = init_std
UpperCAmelCase_: int = encoder_layerdrop
UpperCAmelCase_: Tuple = decoder_layerdrop
UpperCAmelCase_: Any = use_cache
UpperCAmelCase_: List[Any] = encoder_layers
UpperCAmelCase_: Dict = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase_: str = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_a, eos_token_id=_a, is_encoder_decoder=_a, decoder_start_token_id=_a, forced_eos_token_id=_a, **_a, )
class _a ( __SCREAMING_SNAKE_CASE ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __snake_case (self ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_: Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase_: str = {0: """batch"""}
UpperCAmelCase_: str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
UpperCAmelCase_: int = {0: """batch""", 1: """decoder_sequence"""}
UpperCAmelCase_: int = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_a, direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase_: List[str] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase_ , UpperCAmelCase_: List[Any] = self.num_layers
for i in range(_a ):
UpperCAmelCase_: Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCAmelCase_: Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
UpperCAmelCase_: Optional[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __snake_case (self ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_: Tuple = super().outputs
else:
UpperCAmelCase_: Tuple = super(_a, self ).outputs
if self.use_past:
UpperCAmelCase_ , UpperCAmelCase_: str = self.num_layers
for i in range(_a ):
UpperCAmelCase_: Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCAmelCase_: List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, ) -> Any:
UpperCAmelCase_: Dict = self._generate_dummy_inputs_for_encoder_and_decoder(
_a, _a, _a, _a, _a )
# Generate decoder inputs
UpperCAmelCase_: Tuple = seq_length if not self.use_past else 1
UpperCAmelCase_: Any = self._generate_dummy_inputs_for_encoder_and_decoder(
_a, _a, _a, _a, _a )
UpperCAmelCase_: Optional[Any] = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase_: int = dict(**_a, **_a )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_: Any = common_inputs["""input_ids"""].shape
UpperCAmelCase_: Tuple = common_inputs["""decoder_input_ids"""].shape[1]
UpperCAmelCase_ , UpperCAmelCase_: List[str] = self.num_attention_heads
UpperCAmelCase_: Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase_: str = decoder_seq_length + 3
UpperCAmelCase_: str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase_: Optional[Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_a, _a )], dim=1 )
UpperCAmelCase_: int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase_ , UpperCAmelCase_: List[Any] = self.num_layers
UpperCAmelCase_: List[Any] = min(_a, _a )
UpperCAmelCase_: Dict = max(_a, _a ) - min_num_layers
UpperCAmelCase_: Optional[Any] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_a ):
common_inputs["past_key_values"].append(
(
torch.zeros(_a ),
torch.zeros(_a ),
torch.zeros(_a ),
torch.zeros(_a ),
) )
# TODO: test this.
UpperCAmelCase_: Dict = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_a, _a ):
common_inputs["past_key_values"].append((torch.zeros(_a ), torch.zeros(_a )) )
return common_inputs
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, ) -> Optional[Any]:
UpperCAmelCase_: List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
_a, _a, _a, _a, _a )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCAmelCase_: Union[str, Any] = seqlen + 2
UpperCAmelCase_ , UpperCAmelCase_: Optional[Any] = self.num_layers
UpperCAmelCase_ , UpperCAmelCase_: List[str] = self.num_attention_heads
UpperCAmelCase_: int = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase_: int = common_inputs["""attention_mask"""].dtype
UpperCAmelCase_: List[str] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_a, _a, dtype=_a )], dim=1 )
UpperCAmelCase_: List[Any] = [
(torch.zeros(_a ), torch.zeros(_a )) for _ in range(_a )
]
return common_inputs
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, ) -> List[str]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_: int = compute_effective_axis_dimension(
_a, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_: Optional[int] = tokenizer.num_special_tokens_to_add(_a )
UpperCAmelCase_: Union[str, Any] = compute_effective_axis_dimension(
_a, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=_a )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_: Any = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase_: str = dict(tokenizer(_a, return_tensors=_a ) )
return common_inputs
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, ) -> Optional[Any]:
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_: Optional[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_a, batch_size=_a, seq_length=_a, is_pair=_a, framework=_a )
else:
UpperCAmelCase_: int = self._generate_dummy_inputs_for_causal_lm(
_a, batch_size=_a, seq_length=_a, is_pair=_a, framework=_a )
return common_inputs
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_: Optional[Any] = super()._flatten_past_key_values_(_a, _a, _a, _a )
else:
UpperCAmelCase_: List[str] = super(_a, self )._flatten_past_key_values_(
_a, _a, _a, _a )
@property
def __snake_case (self ) -> Optional[int]:
return 1E-4
| 361 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, ) -> int:
UpperCAmelCase_: List[Any] = parent
UpperCAmelCase_: int = batch_size
UpperCAmelCase_: Any = seq_length
UpperCAmelCase_: Optional[int] = is_training
UpperCAmelCase_: Dict = use_input_mask
UpperCAmelCase_: Optional[int] = use_token_type_ids
UpperCAmelCase_: Dict = use_labels
UpperCAmelCase_: List[str] = vocab_size
UpperCAmelCase_: Union[str, Any] = hidden_size
UpperCAmelCase_: List[Any] = num_hidden_layers
UpperCAmelCase_: Tuple = num_attention_heads
UpperCAmelCase_: Optional[int] = intermediate_size
UpperCAmelCase_: Tuple = hidden_act
UpperCAmelCase_: Tuple = hidden_dropout_prob
UpperCAmelCase_: List[str] = attention_probs_dropout_prob
UpperCAmelCase_: Any = max_position_embeddings
UpperCAmelCase_: List[Any] = type_vocab_size
UpperCAmelCase_: List[str] = type_sequence_label_size
UpperCAmelCase_: Tuple = initializer_range
UpperCAmelCase_: Optional[int] = num_labels
UpperCAmelCase_: Union[str, Any] = num_choices
UpperCAmelCase_: Any = scope
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase_: str = None
if self.use_input_mask:
UpperCAmelCase_: Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_: int = None
if self.use_token_type_ids:
UpperCAmelCase_: int = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase_: Dict = None
UpperCAmelCase_: List[str] = None
UpperCAmelCase_: Any = None
if self.use_labels:
UpperCAmelCase_: Tuple = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase_: Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase_: Optional[int] = ids_tensor([self.batch_size], self.num_choices )
UpperCAmelCase_: List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case (self ) -> List[Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, use_stable_embedding=SCREAMING_SNAKE_CASE_, )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCAmelCase_: List[Any] = OpenLlamaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: int = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> Optional[Any]:
UpperCAmelCase_: Tuple = True
UpperCAmelCase_: List[Any] = OpenLlamaModel(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Any = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, encoder_attention_mask=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Optional[int] = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: str = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> List[Any]:
UpperCAmelCase_: Any = OpenLlamaForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> Any:
UpperCAmelCase_: Tuple = True
UpperCAmelCase_: Optional[int] = True
UpperCAmelCase_: Dict = OpenLlamaForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
# first forward pass
UpperCAmelCase_: str = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, encoder_attention_mask=SCREAMING_SNAKE_CASE_, use_cache=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_: Tuple = ids_tensor((self.batch_size, 3), config.vocab_size )
UpperCAmelCase_: Optional[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_: str = torch.cat([input_ids, next_tokens], dim=-1 )
UpperCAmelCase_: str = torch.cat([input_mask, next_mask], dim=-1 )
UpperCAmelCase_: Dict = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, encoder_attention_mask=SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_, )["""hidden_states"""][0]
UpperCAmelCase_: Tuple = model(
SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, encoder_attention_mask=SCREAMING_SNAKE_CASE_, past_key_values=SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_, )["""hidden_states"""][0]
# select random slice
UpperCAmelCase_: str = ids_tensor((1,), output_from_past.shape[-1] ).item()
UpperCAmelCase_: str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_: Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1E-3 ) )
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
): List[Any] = config_and_inputs
UpperCAmelCase_: List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
A = (OpenLlamaForCausalLM,) if is_torch_available() else ()
A = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = False
def __snake_case (self ) -> int:
UpperCAmelCase_: str = OpenLlamaModelTester(self )
UpperCAmelCase_: Any = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def __snake_case (self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_: Dict = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> str:
UpperCAmelCase_ , UpperCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_: int = 3
UpperCAmelCase_: Tuple = input_dict["""input_ids"""]
UpperCAmelCase_: Optional[int] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
UpperCAmelCase_: Optional[int] = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def __snake_case (self ) -> int:
UpperCAmelCase_ , UpperCAmelCase_: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_: Dict = 3
UpperCAmelCase_: Optional[Any] = """single_label_classification"""
UpperCAmelCase_: Optional[int] = input_dict["""input_ids"""]
UpperCAmelCase_: str = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
UpperCAmelCase_: List[str] = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_: Optional[int] = 3
UpperCAmelCase_: int = """multi_label_classification"""
UpperCAmelCase_: Tuple = input_dict["""input_ids"""]
UpperCAmelCase_: int = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_: Optional[Any] = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Any = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def __snake_case (self ) -> int:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_: Dict = ids_tensor([1, 10], config.vocab_size )
UpperCAmelCase_: Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_: Any = OpenLlamaModel(SCREAMING_SNAKE_CASE_ )
original_model.to(SCREAMING_SNAKE_CASE_ )
original_model.eval()
UpperCAmelCase_: Any = original_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
UpperCAmelCase_: Tuple = original_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_: Optional[Any] = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase_: int = OpenLlamaModel(SCREAMING_SNAKE_CASE_ )
scaled_model.to(SCREAMING_SNAKE_CASE_ )
scaled_model.eval()
UpperCAmelCase_: Union[str, Any] = scaled_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
UpperCAmelCase_: Union[str, Any] = scaled_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1E-5 ) )
| 82 | 0 |
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : Any ):
A__ = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
A__ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert("""RGB""" )
A__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
A__ = transform(UpperCAmelCase_ ).unsqueeze(0 ).to(UpperCAmelCase_ )
return image
def _snake_case ( UpperCAmelCase_ : str ):
if "visual_encoder" in key:
A__ = re.sub("""visual_encoder*""" , """vision_model.encoder""" , UpperCAmelCase_ )
if "blocks" in key:
A__ = re.sub(R"""blocks""" , """layers""" , UpperCAmelCase_ )
if "attn" in key:
A__ = re.sub(R"""attn""" , """self_attn""" , UpperCAmelCase_ )
if "norm1" in key:
A__ = re.sub(R"""norm1""" , """layer_norm1""" , UpperCAmelCase_ )
if "norm2" in key:
A__ = re.sub(R"""norm2""" , """layer_norm2""" , UpperCAmelCase_ )
if "encoder.norm" in key:
A__ = re.sub(R"""encoder.norm""" , """post_layernorm""" , UpperCAmelCase_ )
if "encoder.patch_embed.proj" in key:
A__ = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , UpperCAmelCase_ )
if "encoder.pos_embed" in key:
A__ = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , UpperCAmelCase_ )
if "encoder.cls_token" in key:
A__ = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , UpperCAmelCase_ )
if "self_attn" in key:
A__ = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , UpperCAmelCase_ )
return key
@torch.no_grad()
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str]=None ):
if config_path is not None:
A__ = BlipConfig.from_pretrained(UpperCAmelCase_ )
else:
A__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
A__ = BlipForConditionalGeneration(UpperCAmelCase_ ).eval()
A__ = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
A__ = blip_decoder(pretrained=UpperCAmelCase_ , image_size=384 , vit="""base""" )
A__ = pt_model.eval()
A__ = pt_model.state_dict()
for key in modified_state_dict.copy():
A__ = modified_state_dict.pop(UpperCAmelCase_ )
A__ = rename_key(UpperCAmelCase_ )
A__ = value
hf_model.load_state_dict(UpperCAmelCase_ )
A__ = 384
A__ = load_demo_image(image_size=UpperCAmelCase_ , device="""cpu""" )
A__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
A__ = tokenizer(["""a picture of"""] ).input_ids
A__ = hf_model.generate(UpperCAmelCase_ , UpperCAmelCase_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
A__ = hf_model.generate(UpperCAmelCase_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(UpperCAmelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
A__ = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
A__ = blip_vqa(pretrained=UpperCAmelCase_ , image_size=UpperCAmelCase_ , vit="""base""" )
vqa_model.eval()
A__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
A__ = modified_state_dict.pop(UpperCAmelCase_ )
A__ = rename_key(UpperCAmelCase_ )
A__ = value
A__ = BlipForQuestionAnswering(UpperCAmelCase_ )
hf_vqa_model.load_state_dict(UpperCAmelCase_ )
A__ = ["""How many dogs are in this image?"""]
A__ = tokenizer(UpperCAmelCase_ , return_tensors="""pt""" ).input_ids
A__ = hf_vqa_model.generate(UpperCAmelCase_ , UpperCAmelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
A__ = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
A__ = blip_itm(pretrained=UpperCAmelCase_ , image_size=UpperCAmelCase_ , vit="""base""" )
itm_model.eval()
A__ = itm_model.state_dict()
for key in modified_state_dict.copy():
A__ = modified_state_dict.pop(UpperCAmelCase_ )
A__ = rename_key(UpperCAmelCase_ )
A__ = value
A__ = BlipForImageTextRetrieval(UpperCAmelCase_ )
A__ = ["""A picture of a woman with a dog sitting in a beach"""]
A__ = tokenizer(
UpperCAmelCase_ , return_tensors="""pt""" , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(UpperCAmelCase_ )
hf_itm_model.eval()
A__ = hf_itm_model(UpperCAmelCase_ , UpperCAmelCase_ , use_itm_head=UpperCAmelCase_ )
A__ = hf_itm_model(UpperCAmelCase_ , UpperCAmelCase_ , use_itm_head=UpperCAmelCase_ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE_ : int = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 335 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : Dict ): # noqa: E741
A__ = len(UpperCAmelCase_ )
A__ = 0
A__ = [0] * n
A__ = [False] * n
A__ = [False] * n
def dfs(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
if parent == root:
out_edge_count += 1
A__ = True
A__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A__ = True
# AP found via cycle
if at == low[to]:
A__ = True
else:
A__ = min(low[at] , UpperCAmelCase_ )
return out_edge_count
for i in range(UpperCAmelCase_ ):
if not visited[i]:
A__ = 0
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , -1 , UpperCAmelCase_ )
A__ = out_edge_count > 1
for x in range(len(UpperCAmelCase_ ) ):
if is_art[x] is True:
print(UpperCAmelCase_ )
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ : Optional[int] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 335 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225 |
import enum
import shutil
import sys
lowercase , lowercase : List[Any] = shutil.get_terminal_size()
lowercase : Union[str, Any] = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class A__ ( enum.Enum ):
"""simple docstring"""
__A : List[str] = 0
__A : str = 1
def A_ ( A__ , A__="" ) -> int:
sys.stdout.write(str(A__ ) + end )
sys.stdout.flush()
def A_ ( A__ , A__ , A__="" ) -> int:
forceWrite(F'\u001b[{color}m{content}\u001b[0m' , A__ )
def A_ ( ) -> Any:
forceWrite('\r' )
def A_ ( A__ , A__ ) -> List[str]:
forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def A_ ( ) -> Any:
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def A_ ( ) -> Any:
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 225 | 1 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__lowercase = pd.read_csv('''sample_data.csv''', header=None)
__lowercase = df.shape[:1][0]
# If you're using some other dataset input the target column
__lowercase = df.iloc[:, 1:2]
__lowercase = actual_data.values.reshape(len_data, 1)
__lowercase = MinMaxScaler().fit_transform(actual_data)
__lowercase = 1_0
__lowercase = 5
__lowercase = 2_0
__lowercase = len_data - periods * look_back
__lowercase = actual_data[:division]
__lowercase = actual_data[division - look_back :]
__lowercase , __lowercase = [], []
__lowercase , __lowercase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__lowercase = np.array(train_x)
__lowercase = np.array(test_x)
__lowercase = np.array([list(i.ravel()) for i in train_y])
__lowercase = np.array([list(i.ravel()) for i in test_y])
__lowercase = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
__lowercase = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
__lowercase = model.predict(x_test)
| 272 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
| 272 | 1 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = DistilBertTokenizer
_SCREAMING_SNAKE_CASE : List[str] = DistilBertTokenizerFast
_SCREAMING_SNAKE_CASE : List[str] = True
@slow
def lowerCAmelCase (self : str ):
__a : Any = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
__a : Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=snake_case_ )
__a : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=snake_case_ )
__a : int = tokenizer.build_inputs_with_special_tokens(snake_case_ )
__a : Optional[int] = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 90 |
from math import pi, sqrt
def __UpperCamelCase ( lowerCAmelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
if num > 1_71.5:
raise OverflowError('''math range error''' )
elif num - int(lowerCAmelCase__ ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(lowerCAmelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __UpperCamelCase ( ):
assert gamma(0.5 ) == sqrt(lowerCAmelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase__ =1.0
while num:
lowercase__ =float(input('Gamma of: '))
print(F"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 90 | 1 |
'''simple docstring'''
from typing import List
import numpy as np
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = {key: len(UpperCAmelCase_ ) for key, value in gen_kwargs.items() if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(f'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
_UpperCamelCase : Union[str, Any] = max(lists_lengths.values() , default=0 )
return max(1 , UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Dict = []
for group_idx in range(UpperCAmelCase_ ):
_UpperCamelCase : Tuple = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_UpperCamelCase : Union[str, Any] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_UpperCamelCase : Union[str, Any] = range(UpperCAmelCase_ , start + num_shards_to_add )
shards_indices_per_group.append(UpperCAmelCase_ )
return shards_indices_per_group
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : List[str] = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
if num_shards == 1:
return [dict(UpperCAmelCase_ )]
else:
_UpperCamelCase : str = _distribute_shards(num_shards=UpperCAmelCase_ , max_num_jobs=UpperCAmelCase_ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(UpperCAmelCase_ ) )
]
def A__ ( UpperCAmelCase_ ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , UpperCAmelCase_ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = {len(UpperCAmelCase_ ) for value in gen_kwargs.values() if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )}
_UpperCamelCase : Tuple = {}
for size in list_sizes:
_UpperCamelCase : str = list(range(UpperCAmelCase_ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_UpperCamelCase : Tuple = dict(UpperCAmelCase_ )
for key, value in shuffled_kwargs.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Tuple = [value[i] for i in indices_per_size[len(UpperCAmelCase_ )]]
return shuffled_kwargs
| 83 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase__ ( __lowercase : Any ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def lowercase__ ( __lowercase : Tuple ) -> int:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase = emb.weight.shape
__UpperCamelCase = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
__UpperCamelCase = emb.weight.data
return lin_layer
def lowercase__ ( __lowercase : int , __lowercase : List[str]="facebook/mbart-large-en-ro" , __lowercase : str=False , __lowercase : List[Any]=False ) -> int:
"""simple docstring"""
__UpperCamelCase = torch.load(__lowercase , map_location='cpu' )['model']
remove_ignore_keys_(__lowercase )
__UpperCamelCase = state_dict['encoder.embed_tokens.weight'].shape[0]
__UpperCamelCase = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
__UpperCamelCase = 'relu'
__UpperCamelCase = state_dict['decoder.embed_tokens.weight']
__UpperCamelCase = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
__UpperCamelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a__ : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
a__ : Union[str, Any] =parser.parse_args()
a__ : str =convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 53 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class a__ :
def __init__( self , _A , _A=2 , _A=True , _A=False , _A=1_0 , _A=3 , _A=3_2 * 4 , _A=3_2 * 6 , _A=4 , _A=3_2 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = is_training
__lowerCAmelCase = use_auxiliary_loss
__lowerCAmelCase = num_queries
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_size
__lowerCAmelCase = max_size
__lowerCAmelCase = num_labels
__lowerCAmelCase = mask_feature_size
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_A )
__lowerCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_A )
__lowerCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_A ) > 0.5
).float()
__lowerCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=_A ) > 0.5).long()
__lowerCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = output.encoder_hidden_states
__lowerCAmelCase = output.pixel_decoder_hidden_states
__lowerCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ) , config.decoder_config.decoder_layers )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A=False ):
"""simple docstring"""
with torch.no_grad():
__lowerCAmelCase = MaskFormerModel(config=_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(pixel_values=_A , pixel_mask=_A )
__lowerCAmelCase = model(_A , output_hidden_states=_A )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_A , _A )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = MaskFormerForInstanceSegmentation(config=_A )
model.to(_A )
model.eval()
def comm_check_on_output(_A ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCAmelCase = model(pixel_values=_A , pixel_mask=_A )
__lowerCAmelCase = model(_A )
comm_check_on_output(_A )
__lowerCAmelCase = model(
pixel_values=_A , pixel_mask=_A , mask_labels=_A , class_labels=_A )
comm_check_on_output(_A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_a : str = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_a : int = False
_a : str = False
_a : Optional[int] = False
_a : Optional[int] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = MaskFormerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_A , **_A , output_hidden_states=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_A )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
__lowerCAmelCase = MaskFormerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = (self.model_tester.min_size,) * 2
__lowerCAmelCase = {
"pixel_values": torch.randn((2, 3, *size) , device=_A ),
"mask_labels": torch.randn((2, 1_0, *size) , device=_A ),
"class_labels": torch.zeros(2 , 1_0 , device=_A ).long(),
}
__lowerCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_A )
__lowerCAmelCase = model(**_A )
self.assertTrue(outputs.loss is not None )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_A , **_A , output_hidden_states=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A ).to(_A )
__lowerCAmelCase = model(**_A , output_attentions=_A )
self.assertTrue(outputs.attentions is not None )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__lowerCAmelCase = self.all_model_classes[1]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.train()
__lowerCAmelCase = model(_A , mask_labels=_A , class_labels=_A ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.all_model_classes[1]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.train()
__lowerCAmelCase = model(_A , mask_labels=_A , class_labels=_A )
__lowerCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__lowerCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCamelCase__ = 1E-4
def _a ( ):
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class a__ ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(_A )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(_A , return_tensors="pt" ).to(_A )
__lowerCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_A , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
__lowerCAmelCase = model(**_A )
__lowerCAmelCase = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _A , atol=_A ) )
__lowerCAmelCase = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _A , atol=_A ) )
__lowerCAmelCase = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _A , atol=_A ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(_A )
.eval()
)
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(_A , return_tensors="pt" ).to(_A )
__lowerCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_A , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
__lowerCAmelCase = model(**_A )
# masks_queries_logits
__lowerCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__lowerCAmelCase = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
__lowerCAmelCase = torch.tensor(_A ).to(_A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _A , atol=_A ) )
# class_queries_logits
__lowerCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__lowerCAmelCase = torch.tensor(
[
[1.6_5_1_2E0_0, -5.2_5_7_2E0_0, -3.3_5_1_9E0_0],
[3.6_1_6_9E-0_2, -5.9_0_2_5E0_0, -2.9_3_1_3E0_0],
[1.0_7_6_6E-0_4, -7.7_6_3_0E0_0, -5.1_2_6_3E0_0],
] ).to(_A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _A , atol=_A ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(_A )
.eval()
)
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(_A , return_tensors="pt" ).to(_A )
__lowerCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_A , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
__lowerCAmelCase = model(**_A )
# masks_queries_logits
__lowerCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__lowerCAmelCase = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
__lowerCAmelCase = torch.tensor(_A ).to(_A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _A , atol=_A ) )
# class_queries_logits
__lowerCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__lowerCAmelCase = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(_A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _A , atol=_A ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(_A )
.eval()
)
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , )
__lowerCAmelCase = inputs["pixel_values"].to(_A )
__lowerCAmelCase = [el.to(_A ) for el in inputs["mask_labels"]]
__lowerCAmelCase = [el.to(_A ) for el in inputs["class_labels"]]
with torch.no_grad():
__lowerCAmelCase = model(**_A )
self.assertTrue(outputs.loss is not None )
| 102 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
UpperCamelCase__ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
UpperCamelCase__ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Optional[int]="dummy_doc" ):
__lowerCAmelCase = {doc: key_lines}
__lowerCAmelCase = {doc: sys_lines}
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(SCREAMING_SNAKE_CASE_ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE_ )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE_ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(SCREAMING_SNAKE_CASE_ , sys_doc_lines[doc] , SCREAMING_SNAKE_CASE_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE_ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if remove_nested:
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowerCAmelCase = reader.get_mention_assignments(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = reader.get_mention_assignments(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
"Number of resulting singleton clusters in the key "
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
"files, respectively" )
return doc_coref_infos
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
__lowerCAmelCase = get_coref_infos(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for name, metric in metrics:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = evaluator.evaluate_documents(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 1_00:.2f}""" , F""" Precision: {precision * 1_00:.2f}""" , F""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
__lowerCAmelCase = (conll / 3) * 1_00
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({"conll_score": conll} )
return output_scores
def _a ( SCREAMING_SNAKE_CASE_ : List[str] ):
__lowerCAmelCase = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
__lowerCAmelCase = line.split()[5]
if not parse_col == "-":
__lowerCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=True , _A=False , _A=False , _A=False ):
"""simple docstring"""
__lowerCAmelCase = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
__lowerCAmelCase = util.check_gold_parse_annotation(_A )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowerCAmelCase = evaluate(
key_lines=_A , sys_lines=_A , metrics=_A , NP_only=_A , remove_nested=_A , keep_singletons=_A , min_span=_A , )
return score
| 102 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : List[Any] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Dict = ['OwlViTFeatureExtractor']
_snake_case : int = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = filter(lambda lowerCAmelCase_ : p.requires_grad, model.parameters() )
__lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case : Dict = logging.getLogger(__name__)
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[int] ):
if metric == "rouge2":
__lowerCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__lowerCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__lowerCAmelCase = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
__lowerCAmelCase = ModelCheckpoint(
dirpath=lowerCAmelCase_, filename=lowerCAmelCase_, monitor=F"""val_{metric}""", mode='max', save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Any ):
return EarlyStopping(
monitor=F"""val_{metric}""", mode='min' if 'loss' in metric else 'max', patience=lowerCAmelCase_, verbose=lowerCAmelCase_, )
class _UpperCAmelCase ( pl.Callback ):
"""simple docstring"""
def lowercase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ) -> Any:
__lowerCAmelCase = {f"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase_ )
@rank_zero_only
def lowercase ( self : Optional[int] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowerCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__lowerCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowerCAmelCase = od / 'test_results.txt'
__lowerCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'a+' ) as writer:
for key in sorted(lowerCAmelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCAmelCase = metrics[key]
if isinstance(lowerCAmelCase_ , torch.Tensor ):
__lowerCAmelCase = val.item()
__lowerCAmelCase = f"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase_ )
if not save_generations:
return
if "preds" in metrics:
__lowerCAmelCase = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(lowerCAmelCase_ )
@rank_zero_only
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> Dict:
try:
__lowerCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCAmelCase = pl_module.model.num_parameters()
__lowerCAmelCase = count_trainable_parameters(lowerCAmelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def lowercase ( self : int , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ) -> Any:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , 'test' )
@rank_zero_only
def lowercase ( self : List[Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : Any ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 284 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 358 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase__ :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=99 , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : int=9 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.002 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , ) -> Optional[Any]:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = encoder_seq_length
__lowerCamelCase = decoder_seq_length
# For common tests
__lowerCamelCase = self.decoder_seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = d_ff
__lowerCamelCase = relative_attention_num_buckets
__lowerCamelCase = dropout_rate
__lowerCamelCase = initializer_factor
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = decoder_start_token_id
__lowerCamelCase = None
__lowerCamelCase = decoder_layers
def __A ( self : Any ) -> Tuple:
return TaConfig.from_pretrained('''google/umt5-base''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> Optional[int]:
if attention_mask is None:
__lowerCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__lowerCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
if decoder_head_mask is None:
__lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
if cross_attn_head_mask is None:
__lowerCamelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __A ( self : List[Any] ) -> Tuple:
__lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase = self.get_config()
__lowerCamelCase = config.num_attention_heads
__lowerCamelCase = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, input_dict
def __A ( self : Tuple ) -> List[str]:
__lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def __A ( self : Optional[Any] ) -> Any:
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __A ( self : List[Any] ) -> Any:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int:
__lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(
input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = result.last_hidden_state
__lowerCamelCase = result.past_key_values
__lowerCamelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Dict:
__lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder().to(SCREAMING_SNAKE_CASE__ ).eval()
# first forward pass
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) + 1 )
__lowerCamelCase , __lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[int]:
__lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).half().eval()
__lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE__ ).any().item() )
@require_torch
class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
a__ : List[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a__ : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a__ : Tuple = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a__ : int = True
a__ : int = False
a__ : Tuple = False
a__ : Optional[int] = True
a__ : Optional[int] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a__ : Tuple = [0.8, 0.9]
def __A ( self : Tuple ) -> Tuple:
__lowerCamelCase = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def __A ( self : List[str] ) -> Union[str, Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
SCREAMING_SNAKE_CASE__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=SCREAMING_SNAKE_CASE__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __A ( self : Union[str, Any] ) -> Any:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> Any:
__lowerCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = config_and_inputs[0]
__lowerCamelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval()
model.to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
}
for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE__ , head_masking.items() ):
__lowerCamelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__lowerCamelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE__ , return_dict_in_generate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def __A ( self : Tuple ) -> Optional[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def __A ( self : int ) -> Optional[Any]:
__lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=SCREAMING_SNAKE_CASE__ , legacy=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
__lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ ).input_ids
# fmt: off
__lowerCamelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model.generate(input_ids.to(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
__lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 339 | 0 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : List[Any] = logging.getLogger()
def _lowerCAmelCase ( _UpperCamelCase : Path , _UpperCamelCase : list ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='\n'.join(_UpperCamelCase )
Path(_UpperCamelCase ).open('w' ).writelines(_UpperCamelCase )
lowerCamelCase : Tuple = "patrickvonplaten/t5-tiny-random"
lowerCamelCase : Tuple = "sshleifer/bart-tiny-random"
lowerCamelCase : List[Any] = "sshleifer/tiny-mbart"
lowerCamelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class A__ ( A__ ):
def A ( self : Any , _a : Dict ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_SCREAMING_SNAKE_CASE =input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_SCREAMING_SNAKE_CASE =[' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(_a , _a )
_SCREAMING_SNAKE_CASE =str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_SCREAMING_SNAKE_CASE ='translation_en_to_de' if model == T5_TINY else 'summarization'
_SCREAMING_SNAKE_CASE =f"\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n ".split()
with patch.object(_a , 'argv' , _a ):
run_generate()
assert Path(_a ).exists()
# os.remove(Path(output_file_name))
def A ( self : List[str] ) -> str:
'''simple docstring'''
self.run_eval_tester(_a )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def A ( self : Optional[Any] , _a : Tuple ) -> List[str]:
'''simple docstring'''
self.run_eval_tester(_a )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def A ( self : Dict , _a : Dict ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_SCREAMING_SNAKE_CASE =input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_SCREAMING_SNAKE_CASE ={
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_SCREAMING_SNAKE_CASE =Path(self.get_auto_remove_tmp_dir() )
_SCREAMING_SNAKE_CASE =str(tmp_dir / 'scores.json' )
_SCREAMING_SNAKE_CASE =str(tmp_dir / 'val.target' )
_dump_articles(_a , text['en'] )
_dump_articles(_a , text['de'] )
_SCREAMING_SNAKE_CASE ='translation_en_to_de' if model == T5_TINY else 'summarization'
_SCREAMING_SNAKE_CASE =f"\n run_eval_search.py\n {model}\n {str(_a )}\n {str(_a )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n ".split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(_a , 'argv' , _a ):
with CaptureStdout() as cs:
run_search()
_SCREAMING_SNAKE_CASE =[' num_beams | length_penalty', model, 'Best score args']
_SCREAMING_SNAKE_CASE =['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(_a )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_a ).exists()
os.remove(Path(_a ) )
| 47 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A__ ( A__ ):
A__ = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
A__ = 'CIDAS/clipseg-rd64-refined'
A__ = 'image_segmenter'
A__ = CLIPSegForImageSegmentation
A__ = ['image', 'text']
A__ = ['image']
def __init__( self : Any , *_a : Dict , **_a : str ) -> Any:
'''simple docstring'''
requires_backends(self , ['vision'] )
super().__init__(*_a , **_a )
def A ( self : int , _a : "Image" , _a : str ) -> Optional[Any]:
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=_a , return_tensors='pt' )
def A ( self : Dict , _a : Dict ) -> str:
'''simple docstring'''
with torch.no_grad():
_SCREAMING_SNAKE_CASE =self.model(**_a ).logits
return logits
def A ( self : Any , _a : str ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =outputs.cpu().detach().numpy()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 47 | 1 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str):
lowercase__ : Optional[int] = RobertaPreLayerNormConfig.from_pretrained(
_lowerCamelCase , architectures=["RobertaPreLayerNormForMaskedLM"])
# convert state_dict
lowercase__ : List[str] = torch.load(hf_hub_download(repo_id=_lowerCamelCase , filename="pytorch_model.bin"))
lowercase__ : Tuple = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta."):
lowercase__ : Dict = "roberta_prelayernorm." + tensor_key[len("roberta.") :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight") or tensor_key.endswith(".self.LayerNorm.bias"):
continue
lowercase__ : Dict = tensor_value
lowercase__ : Optional[Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase)
model.save_pretrained(_lowerCamelCase)
# convert tokenizer
lowercase__ : Tuple = AutoTokenizer.from_pretrained(_lowerCamelCase)
tokenizer.save_pretrained(_lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 333 | def lowercase_ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
return int((number_a + number_a) / 2)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(_lowerCamelCase : int) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowercase__ : Optional[int] = lower
lowercase__ : List[Any] = higher
lowercase__ : Dict = []
while True:
lowercase__ : Any = get_avg(_lowerCamelCase , _lowerCamelCase)
last_numbers.append(_lowerCamelCase)
if answer(_lowerCamelCase) == "low":
lowercase__ : List[str] = number
elif answer(_lowerCamelCase) == "high":
lowercase__ : Optional[int] = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''')
print(f'''details : {last_numbers!s}''')
def lowercase_ ( ):
lowercase__ : Tuple = int(input("Enter lower value : ").strip())
lowercase__ : Optional[int] = int(input("Enter high value : ").strip())
lowercase__ : Optional[Any] = int(input("Enter value to guess : ").strip())
guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
main()
| 333 | 1 |
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = ""
for i in table:
res += inp[i - 1]
return res
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return data[1:] + data[0]
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = ""
for i in range(len(a_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = int("0b" + data[0] + data[-1] , 2 )
lowercase__ : int = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = message[:4]
lowercase__ : int = message[4:]
lowercase__ : Dict = apply_table(a_ , a_ )
lowercase__ : int = xor(a_ , a_ )
lowercase__ : List[str] = apply_sbox(a_ , temp[:4] ) # noqa: E741
lowercase__ : Any = apply_sbox(a_ , temp[4:] )
lowercase__ : Tuple = "0" * (2 - len(a_ )) + l # noqa: E741
lowercase__ : Optional[int] = "0" * (2 - len(a_ )) + r
lowercase__ : Union[str, Any] = apply_table(l + r , a_ )
lowercase__ : Dict = xor(a_ , a_ )
return temp + right
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter 10 bit key: ''')
lowerCAmelCase__ = input('''Enter 8 bit message: ''')
lowerCAmelCase__ = [6, 3, 7, 4, 8, 5, 1_0, 9]
lowerCAmelCase__ = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
lowerCAmelCase__ = [2, 4, 3, 1]
lowerCAmelCase__ = [2, 6, 3, 1, 4, 8, 5, 7]
lowerCAmelCase__ = [4, 1, 3, 5, 7, 2, 8, 6]
lowerCAmelCase__ = [4, 1, 2, 3, 2, 3, 4, 1]
lowerCAmelCase__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowerCAmelCase__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowerCAmelCase__ = apply_table(key, paa_table)
lowerCAmelCase__ = temp[:5]
lowerCAmelCase__ = temp[5:]
lowerCAmelCase__ = left_shift(left)
lowerCAmelCase__ = left_shift(right)
lowerCAmelCase__ = apply_table(left + right, pa_table)
lowerCAmelCase__ = left_shift(left)
lowerCAmelCase__ = left_shift(right)
lowerCAmelCase__ = left_shift(left)
lowerCAmelCase__ = left_shift(right)
lowerCAmelCase__ = apply_table(left + right, pa_table)
# encryption
lowerCAmelCase__ = apply_table(message, IP)
lowerCAmelCase__ = function(expansion, sa, sa, keya, temp)
lowerCAmelCase__ = temp[4:] + temp[:4]
lowerCAmelCase__ = function(expansion, sa, sa, keya, temp)
lowerCAmelCase__ = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
lowerCAmelCase__ = apply_table(CT, IP)
lowerCAmelCase__ = function(expansion, sa, sa, keya, temp)
lowerCAmelCase__ = temp[4:] + temp[:4]
lowerCAmelCase__ = function(expansion, sa, sa, keya, temp)
lowerCAmelCase__ = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 130 | '''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"-m", "--pretrained_model_name_or_path", type=a_, default=a_, required=a_, help="Path to pretrained model or model identifier from huggingface.co/models.", )
parser.add_argument(
"-c", "--caption", type=a_, default="robotic cat with wings", help="Text used to generate images.", )
parser.add_argument(
"-n", "--images_num", type=a_, default=4, help="How much images to generate.", )
parser.add_argument(
"-s", "--seed", type=a_, default=42, help="Seed for random process.", )
parser.add_argument(
"-ci", "--cuda_id", type=a_, default=0, help="cuda_id.", )
_UpperCAmelCase : Any = parser.parse_args()
return args
def __UpperCAmelCase ( a_: Any, a_: List[Any], a_: Optional[Any] ):
if not len(a_ ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = imgs[0].size
_UpperCAmelCase : Union[str, Any] = Image.new("RGB", size=(cols * w, rows * h) )
_UpperCAmelCase , _UpperCAmelCase : Any = grid.size
for i, img in enumerate(a_ ):
grid.paste(a_, box=(i % cols * w, i // cols * h) )
return grid
def __UpperCAmelCase ( a_: List[str], a_: Optional[int]="robotic cat with wings", a_: List[str]=7.5, a_: Optional[int]=50, a_: List[Any]=1, a_: Union[str, Any]=42, ):
_UpperCAmelCase : Optional[Any] = torch.Generator(pipeline.device ).manual_seed(a_ )
_UpperCAmelCase : Dict = pipeline(
a_, guidance_scale=a_, num_inference_steps=a_, generator=a_, num_images_per_prompt=a_, ).images
_UpperCAmelCase : Any = int(math.sqrt(a_ ) )
_UpperCAmelCase : List[Any] = image_grid(a_, rows=_rows, cols=num_images_per_prompt // _rows )
return grid, images
__a = parse_args()
# Load models and create wrapper for stable diffusion
__a = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
__a = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
__a = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
__a = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
__a = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__a = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
__a = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
__a = unet.to(torch.device('cuda', args.cuda_id))
__a = pipeline.to(unet.device)
__a , __a = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
__a = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1))) | 145 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=7 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=18 , _UpperCAmelCase : Dict=30 , _UpperCAmelCase : List[str]=400 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Any=None , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Any=[0.4814_5466, 0.457_8275, 0.4082_1073] , _UpperCAmelCase : int=[0.2686_2954, 0.2613_0258, 0.2757_7711] , _UpperCAmelCase : Union[str, Any]=True , ):
_A = size if size is not None else {'height': 224, 'width': 224}
_A = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_center_crop
_A = crop_size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_convert_rgb
def lowerCAmelCase_ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : List[Any]=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_A = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_A = []
for i in range(self.batch_size ):
_A , _A = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_A = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
if torchify:
_A = [torch.from_numpy(_UpperCAmelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class lowercase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Any ):
_A = ChineseCLIPImageProcessingTester(self , do_center_crop=_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Any ):
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_convert_rgb' ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 224, 'width': 224} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Union[str, Any] ):
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = self.image_processor_tester.prepare_inputs(equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_A = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = self.image_processor_tester.prepare_inputs(equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_A = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = self.image_processor_tester.prepare_inputs(equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_A = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class lowercase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Any = ChineseCLIPImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : List[Any] ):
_A = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_UpperCAmelCase )
_A = 3
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Any ):
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_convert_rgb' ) )
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = self.image_processor_tester.prepare_inputs(equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_A = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 271 |
"""simple docstring"""
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : str = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
UpperCAmelCase : List[Any] = ['''accelerate''', '''launch''']
UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
UpperCAmelCase : Union[str, Any] = '''default_config.yaml'''
UpperCAmelCase : Union[str, Any] = config_folder / config_file
UpperCAmelCase : Union[str, Any] = config_folder / '''_default_config.yaml'''
UpperCAmelCase : List[Any] = Path('''tests/test_configs''' )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCAmelCase_ ( cls : Tuple ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCAmelCase_ ( self : Optional[int] ):
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=_UpperCAmelCase ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_UpperCAmelCase ), self.test_file_path] , env=os.environ.copy() )
def lowerCAmelCase_ ( self : Any ):
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Dict = '''test-tpu'''
UpperCAmelCase : Optional[int] = '''us-central1-a'''
UpperCAmelCase : List[str] = '''ls'''
UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
UpperCAmelCase : Optional[Any] = '''cd /usr/share'''
UpperCAmelCase : Optional[Any] = '''tests/test_samples/test_command_file.sh'''
UpperCAmelCase : str = '''Running gcloud compute tpus tpu-vm ssh'''
def lowerCAmelCase_ ( self : Any ):
_A = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : Dict ):
_A = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_UpperCAmelCase )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : str ):
_A = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : List[str] ):
_A = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : str ):
_A = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : List[Any] ):
_A = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : int ):
_A = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
| 271 | 1 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=4 , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_attention_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_choices
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_attention_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = FlaxRoFormerModelTester(self )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__ = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=_a )
lowerCamelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowerCamelCase__ = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ = model(_a )[0]
lowerCamelCase__ = 5_0_0_0_0
lowerCamelCase__ = (1, 6, vocab_size)
self.assertEqual(output.shape , _a )
lowerCamelCase__ = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
| 209 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCAmelCase : int = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = None , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ):
"""simple docstring"""
super().__init__(**_a )
lowerCamelCase = size if size is not None else {"""shortest_edge""": 256}
lowerCamelCase = get_size_dict(_a , default_to_square=_a )
lowerCamelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" )
lowerCamelCase = do_resize
lowerCamelCase = size
lowerCamelCase = resample
lowerCamelCase = do_center_crop
lowerCamelCase = crop_size
lowerCamelCase = do_rescale
lowerCamelCase = rescale_factor
lowerCamelCase = do_normalize
lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ):
"""simple docstring"""
lowerCamelCase = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCamelCase = get_resize_output_image_size(_a , size=size["""shortest_edge"""] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a , _a = None , **_a , ):
"""simple docstring"""
lowerCamelCase = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a , _a = None , **_a ):
"""simple docstring"""
return rescale(_a , scale=_a , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a , _a , _a = None , **_a , ):
"""simple docstring"""
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ):
"""simple docstring"""
lowerCamelCase = do_resize if do_resize is not None else self.do_resize
lowerCamelCase = size if size is not None else self.size
lowerCamelCase = get_size_dict(_a , default_to_square=_a )
lowerCamelCase = resample if resample is not None else self.resample
lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase = crop_size if crop_size is not None else self.crop_size
lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" )
lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase = image_mean if image_mean is not None else self.image_mean
lowerCamelCase = image_std if image_std is not None else self.image_std
lowerCamelCase = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase = [to_numpy_array(_a ) for image in images]
if do_resize:
lowerCamelCase = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
lowerCamelCase = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
lowerCamelCase = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
lowerCamelCase = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
lowerCamelCase = [to_channel_dimension_format(_a , _a ) for image in images]
lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=_a , tensor_type=_a )
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_a ):
lowerCamelCase = target_sizes.numpy()
lowerCamelCase = []
for idx in range(len(_a ) ):
lowerCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_a )
lowerCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
lowerCamelCase = logits.argmax(dim=1 )
lowerCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 291 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :List[Any] = KandinskyVaaControlnetPipeline
UpperCAmelCase_ :Union[str, Any] = ["image_embeds", "negative_image_embeds", "hint"]
UpperCAmelCase_ :str = ["image_embeds", "negative_image_embeds", "hint"]
UpperCAmelCase_ :Union[str, Any] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase_ :List[Any] = False
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return 32
@property
def __lowerCAmelCase ( self ) -> Tuple:
return 32
@property
def __lowerCAmelCase ( self ) -> int:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) -> Any:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) -> int:
return 100
@property
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCAmelCase_ :Tuple = UNetaDConditionModel(**__A )
return model
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase_ :List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Any = self.dummy_unet
lowerCAmelCase_ :List[Any] = self.dummy_movq
lowerCAmelCase_ :int = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__A , set_alpha_to_one=__A , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__A , )
lowerCAmelCase_ :Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> str:
lowerCAmelCase_ :Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__A )
# create hint
lowerCAmelCase_ :Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :List[Any] = torch.manual_seed(__A )
else:
lowerCAmelCase_ :List[str] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :str = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Dict = """cpu"""
lowerCAmelCase_ :int = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
lowerCAmelCase_ :Union[str, Any] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[int] = pipe(**self.get_dummy_inputs(__A ) )
lowerCAmelCase_ :Union[str, Any] = output.images
lowerCAmelCase_ :int = pipe(
**self.get_dummy_inputs(__A ) , return_dict=__A , )[0]
lowerCAmelCase_ :Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase_ :Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ :Optional[Any] = np.array(
[0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
lowerCAmelCase_ :Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
lowerCAmelCase_ :Optional[Any] = torch.from_numpy(np.array(__A ) ).float() / 2_5_5.0
lowerCAmelCase_ :str = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCAmelCase_ :List[str] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__A )
lowerCAmelCase_ :List[str] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
lowerCAmelCase_ :Optional[Any] = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Dict = """A robot, 4k photo"""
lowerCAmelCase_ :Union[str, Any] = torch.Generator(device="""cuda""" ).manual_seed(0 )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = pipe_prior(
__A , generator=__A , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowerCAmelCase_ :Tuple = torch.Generator(device="""cuda""" ).manual_seed(0 )
lowerCAmelCase_ :Dict = pipeline(
image_embeds=__A , negative_image_embeds=__A , hint=__A , generator=__A , num_inference_steps=100 , output_type="""np""" , )
lowerCAmelCase_ :Any = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__A , __A )
| 1 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__UpperCAmelCase = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
__UpperCAmelCase = dataset.iloc[:, 1:2].values
__UpperCAmelCase = dataset.iloc[:, 2].values
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0)
__UpperCAmelCase = PolynomialFeatures(degree=4)
__UpperCAmelCase = poly_reg.fit_transform(X)
__UpperCAmelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def _snake_case ( ) -> str:
'''simple docstring'''
plt.scatter(lowercase__ , lowercase__ , color="""red""" )
plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 1 | 1 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase_ = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def __lowercase ( __lowercase , __lowercase ) -> tuple[str, float]:
'''simple docstring'''
_A = len([g for position, g in enumerate(__lowercase ) if g == main_target[position]] )
return (item, float(__lowercase ))
def __lowercase ( __lowercase , __lowercase ) -> tuple[str, str]:
'''simple docstring'''
_A = random.randint(0 , len(__lowercase ) - 1 )
_A = parent_a[:random_slice] + parent_a[random_slice:]
_A = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __lowercase ( __lowercase , __lowercase ) -> str:
'''simple docstring'''
_A = list(__lowercase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_A = random.choice(__lowercase )
return "".join(__lowercase )
def __lowercase ( __lowercase , __lowercase , __lowercase , ) -> list[str]:
'''simple docstring'''
_A = []
# Generate more children proportionally to the fitness score.
_A = int(parent_a[1] * 100 ) + 1
_A = 10 if child_n >= 10 else child_n
for _ in range(__lowercase ):
_A = population_score[random.randint(0 , __lowercase )][0]
_A , _A = crossover(parent_a[0] , __lowercase )
# Append new string to the population list.
pop.append(mutate(__lowercase , __lowercase ) )
pop.append(mutate(__lowercase , __lowercase ) )
return pop
def __lowercase ( __lowercase , __lowercase , __lowercase = True ) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
_A = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(__lowercase )
# Verify that the target contains no genes besides the ones inside genes variable.
_A = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_A = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(__lowercase )
# Generate random starting population.
_A = []
for _ in range(__lowercase ):
population.append("".join([random.choice(__lowercase ) for i in range(len(__lowercase ) )] ) )
# Just some logs to know what the algorithms is doing.
_A , _A = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__lowercase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_A = [evaluate(__lowercase , __lowercase ) for item in population]
# Check if there is a matching evolution.
_A = sorted(__lowercase , key=lambda __lowercase : x[1] , reverse=__lowercase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_A = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__lowercase )
# Normalize population score to be between 0 and 1.
_A = [
(item, score / len(__lowercase )) for item, score in population_score
]
# This is selection
for i in range(__lowercase ):
population.extend(select(population_score[int(__lowercase )] , __lowercase , __lowercase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__lowercase ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase_ = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
lowerCamelCase_ = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 79 | '''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
def a_ ( self):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = DPTImageProcessor if is_vision_available() else None
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = DPTImageProcessingTester(self)
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean"""))
self.assertTrue(hasattr(__lowerCAmelCase , """image_std"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """size"""))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18})
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 272 | 0 |
"""simple docstring"""
from torch import nn
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ) -> Any:
super().__init__()
lowerCAmelCase__ = class_size
lowerCAmelCase__ = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowerCAmelCase__ = nn.Linear(lowercase_ , lowercase_ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[Any]:
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowerCAmelCase__ = self.mlp(lowercase_ )
return logits
| 371 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase = 'src/diffusers'
UpperCamelCase = '.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase = spec.loader.load_module()
def _A ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
return line.startswith(lowerCAmelCase_ ) or len(lowerCAmelCase_ ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , lowerCAmelCase_ ) is not None
def _A ( lowerCAmelCase_ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = object_name.split("." )
lowerCAmelCase__ = 0
# First let's find the module where our object lives.
lowerCAmelCase__ = parts[i]
while i < len(lowerCAmelCase_ ) and not os.path.isfile(os.path.join(lowerCAmelCase_ , F'{module}.py' ) ):
i += 1
if i < len(lowerCAmelCase_ ):
lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , parts[i] )
if i >= len(lowerCAmelCase_ ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(lowerCAmelCase_ , F'{module}.py' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase__ = ""
lowerCAmelCase__ = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase_ ) and re.search(rF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase_ ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase__ = line_index
while line_index < len(lowerCAmelCase_ ) and _should_continue(lines[line_index] , lowerCAmelCase_ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase__ = lines[start_index:line_index]
return "".join(lowerCAmelCase_ )
UpperCamelCase = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase = re.compile(R'<FILL\s+[^>]*>')
def _A ( lowerCAmelCase_ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = code.split("\n" )
lowerCAmelCase__ = 0
while idx < len(lowerCAmelCase_ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCAmelCase_ ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = len(get_indent(lowerCAmelCase_ ) ) > 0
if has_indent:
lowerCAmelCase__ = F'class Bla:\n{code}'
lowerCAmelCase__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCAmelCase_ )
lowerCAmelCase__ = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = style_docstrings_in_code(lowerCAmelCase_ )
return result[len("class Bla:\n" ) :] if has_indent else result
def _A ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]=False ):
"""simple docstring"""
with open(lowerCAmelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase_ ):
lowerCAmelCase__ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = search.groups()
lowerCAmelCase__ = find_code_in_diffusers(lowerCAmelCase_ )
lowerCAmelCase__ = get_indent(lowerCAmelCase_ )
lowerCAmelCase__ = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase__ = theoretical_indent
lowerCAmelCase__ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase__ = True
while line_index < len(lowerCAmelCase_ ) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase_ ):
break
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _should_continue(lowerCAmelCase_ , lowerCAmelCase_ ) and re.search(F'^{indent}# End copy' , lowerCAmelCase_ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase__ = lines[start_index:line_index]
lowerCAmelCase__ = "".join(lowerCAmelCase_ )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase__ = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(lowerCAmelCase_ ) is None]
lowerCAmelCase__ = "\n".join(lowerCAmelCase_ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase_ ) > 0:
lowerCAmelCase__ = replace_pattern.replace("with" , "" ).split("," )
lowerCAmelCase__ = [_re_replace_pattern.search(lowerCAmelCase_ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = pattern.groups()
lowerCAmelCase__ = re.sub(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if option.strip() == "all-casing":
lowerCAmelCase__ = re.sub(obja.lower() , obja.lower() , lowerCAmelCase_ )
lowerCAmelCase__ = re.sub(obja.upper() , obja.upper() , lowerCAmelCase_ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase__ = blackify(lines[start_index - 1] + theoretical_code )
lowerCAmelCase__ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCAmelCase__ = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase__ = start_index + 1
if overwrite and len(lowerCAmelCase_ ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(lowerCAmelCase_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowerCAmelCase_ )
return diffs
def _A ( lowerCAmelCase_ : bool = False ):
"""simple docstring"""
lowerCAmelCase__ = glob.glob(os.path.join(lowerCAmelCase_ , "**/*.py" ) , recursive=lowerCAmelCase_ )
lowerCAmelCase__ = []
for filename in all_files:
lowerCAmelCase__ = is_copy_consistent(lowerCAmelCase_ , lowerCAmelCase_ )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(lowerCAmelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(lowerCAmelCase_ )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 221 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowercase : int = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 332 |
"""simple docstring"""
_lowercase : Any = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_lowercase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
_lowercase : int = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 332 | 1 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : Optional[int] = logging.get_logger(__name__)
a_ : List[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Model type selected in the list: """ + """, """.join(_SCREAMING_SNAKE_CASE )} )
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
_lowerCAmelCase = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowerCAmelCase = field(
default=1_2_8 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
_lowerCAmelCase = field(
default=6_4 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
_lowerCAmelCase = field(
default=3_0 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
_lowerCAmelCase = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_lowerCAmelCase = field(
default=2_0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_lowerCAmelCase = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
_lowerCAmelCase = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """train"""
_lowerCAmelCase = """dev"""
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 4_2
_lowerCAmelCase = 4_2
_lowerCAmelCase = 4_2
_lowerCAmelCase = 4_2
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = Split.train , __magic_name__ = False , __magic_name__ = None , __magic_name__ = "pt" , ) -> Any:
_a = args
_a = is_language_sensitive
_a = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__magic_name__ , __magic_name__ ):
try:
_a = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
_a = mode
# Load data features from cache or dataset file
_a = 'v2' if args.version_2_with_negative else 'v1'
_a = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a = cached_features_file + '.lock'
with FileLock(__magic_name__ ):
if os.path.exists(__magic_name__ ) and not args.overwrite_cache:
_a = time.time()
_a = torch.load(__magic_name__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_a = self.old_features['features']
_a = self.old_features.get('dataset' , __magic_name__ )
_a = self.old_features.get('examples' , __magic_name__ )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
' future run' )
else:
if mode == Split.dev:
_a = self.processor.get_dev_examples(args.data_dir )
else:
_a = self.processor.get_train_examples(args.data_dir )
_a , _a = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__magic_name__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__magic_name__ , )
_a = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , __magic_name__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> List[Any]:
return len(self.features )
def __getitem__( self , __magic_name__ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
_a = self.features[i]
_a = torch.tensor(feature.input_ids , dtype=torch.long )
_a = torch.tensor(feature.attention_mask , dtype=torch.long )
_a = torch.tensor(feature.token_type_ids , dtype=torch.long )
_a = torch.tensor(feature.cls_index , dtype=torch.long )
_a = torch.tensor(feature.p_mask , dtype=torch.float )
_a = torch.tensor(feature.is_impossible , dtype=torch.float )
_a = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_a = torch.tensor(feature.start_position , dtype=torch.long )
_a = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 371 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
a_ : Optional[Any] = TypeVar("T")
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (position - 1) // 2
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (2 * position) + 1
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (2 * position) + 2
class a ( Generic[T] ):
def __init__( self ) -> None:
_a = []
_a = {}
_a = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def __UpperCAmelCase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_a = self.elements
self.elements += 1
self._bubble_up(__magic_name__ )
def __UpperCAmelCase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_a , _a = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_a , _a = self.heap[0]
self._bubble_down(__magic_name__ )
return elem
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Update the weight of the given key
_a = self.position_map[elem]
_a = (elem, weight)
if position > 0:
_a = get_parent_position(__magic_name__ )
_a , _a = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__magic_name__ )
else:
self._bubble_down(__magic_name__ )
else:
self._bubble_down(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_a = self.position_map[elem]
if curr_pos == 0:
return None
_a = get_parent_position(__magic_name__ )
_a , _a = self.heap[curr_pos]
_a , _a = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_up(__magic_name__ )
return None
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_a = self.position_map[elem]
_a , _a = self.heap[curr_pos]
_a = get_child_left_position(__magic_name__ )
_a = get_child_right_position(__magic_name__ )
if child_left_position < self.elements and child_right_position < self.elements:
_a , _a = self.heap[child_left_position]
_a , _a = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
if child_left_position < self.elements:
_a , _a = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
else:
return None
if child_right_position < self.elements:
_a , _a = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
return None
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Swap the nodes at the given positions
_a = self.heap[nodea_pos][0]
_a = self.heap[nodea_pos][0]
_a , _a = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_a = nodea_pos
_a = nodea_pos
class a ( Generic[T] ):
def __init__( self ) -> None:
_a = {}
_a = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_a = {}
self.nodes += 1
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__magic_name__ )
self.add_node(__magic_name__ )
_a = weight
_a = weight
def _A (lowerCAmelCase__ :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
'''simple docstring'''
_a = {node: maxsize for node in graph.connections}
_a = {node: None for node in graph.connections}
_a = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase__ , lowerCAmelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
_a = priority_queue.extract_min()
_a = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
_a = node
# running prim's algorithm
while not priority_queue.is_empty():
_a = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
_a = node
return dist, parent
| 104 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase = 50 ) -> int:
UpperCAmelCase__ : Optional[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 171 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
UpperCAmelCase__ : List[str] = text_generator("""This is a test""" , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
UpperCAmelCase__ : List[Any] = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
_lowerCamelCase , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
UpperCAmelCase__ : int = text_generator("""This is a test""" , do_sample=_lowerCamelCase , num_return_sequences=2 , return_tensors=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{"""generated_token_ids""": ANY(_lowerCamelCase )},
{"""generated_token_ids""": ANY(_lowerCamelCase )},
] , )
UpperCAmelCase__ : Optional[int] = text_generator.model.config.eos_token_id
UpperCAmelCase__ : Any = """<pad>"""
UpperCAmelCase__ : Any = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=_lowerCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=_lowerCamelCase , )
self.assertEqual(
_lowerCamelCase , [
[
{"""generated_token_ids""": ANY(_lowerCamelCase )},
{"""generated_token_ids""": ANY(_lowerCamelCase )},
],
[
{"""generated_token_ids""": ANY(_lowerCamelCase )},
{"""generated_token_ids""": ANY(_lowerCamelCase )},
],
] , )
@require_tf
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
UpperCAmelCase__ : List[str] = text_generator("""This is a test""" , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
UpperCAmelCase__ : Dict = text_generator(["""This is a test""", """This is a second test"""] , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = TextGenerationPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
return text_generator, ["This is a test", "Another test"]
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = """Hello I believe in"""
UpperCAmelCase__ : Optional[int] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase__ : Any = text_generator(_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
UpperCAmelCase__ : int = text_generator(_lowerCamelCase , stop_sequence=""" fe""" )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": """Hello I believe in fe"""}] )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = text_generator.model
UpperCAmelCase__ : Union[str, Any] = text_generator.tokenizer
UpperCAmelCase__ : Any = text_generator("""This is a test""" )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCAmelCase__ : List[Any] = text_generator("""This is a test""" , return_full_text=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCAmelCase__ : int = pipeline(task="""text-generation""" , model=_lowerCamelCase , tokenizer=_lowerCamelCase , return_full_text=_lowerCamelCase )
UpperCAmelCase__ : Dict = text_generator("""This is a test""" )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCAmelCase__ : Optional[Any] = text_generator("""This is a test""" , return_full_text=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCAmelCase__ : Union[str, Any] = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
[{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}],
[{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCAmelCase__ : Union[str, Any] = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
[{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}],
[{"""generated_text""": ANY(_lowerCamelCase )}, {"""generated_text""": ANY(_lowerCamelCase )}],
] , )
with self.assertRaises(_lowerCamelCase ):
UpperCAmelCase__ : List[Any] = text_generator("""test""" , return_full_text=_lowerCamelCase , return_text=_lowerCamelCase )
with self.assertRaises(_lowerCamelCase ):
UpperCAmelCase__ : Optional[Any] = text_generator("""test""" , return_full_text=_lowerCamelCase , return_tensors=_lowerCamelCase )
with self.assertRaises(_lowerCamelCase ):
UpperCAmelCase__ : Any = text_generator("""test""" , return_text=_lowerCamelCase , return_tensors=_lowerCamelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCAmelCase__ : Dict = text_generator("""""" )
self.assertEqual(_lowerCamelCase , [{"""generated_text""": ANY(_lowerCamelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCAmelCase__ : str = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCAmelCase__ : Tuple = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 500 , max_new_tokens=20 )
UpperCAmelCase__ : str = text_generator("""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_lowerCamelCase ):
text_generator(
"""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _a (self ):
"""simple docstring"""
import torch
# Classic `model_kwargs`
UpperCAmelCase__ : str = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCAmelCase__ : List[str] = pipe("""This is a test""" )
self.assertEqual(
_lowerCamelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCAmelCase__ : int = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCAmelCase__ : Any = pipe("""This is a test""" )
self.assertEqual(
_lowerCamelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCAmelCase__ : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCAmelCase__ : Optional[int] = pipe("""This is a test""" )
self.assertEqual(
_lowerCamelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def _a (self ):
"""simple docstring"""
import torch
UpperCAmelCase__ : Any = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def _a (self ):
"""simple docstring"""
import torch
UpperCAmelCase__ : Any = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=_lowerCamelCase , top_p=0.5 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = """Hello world"""
UpperCAmelCase__ : str = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
UpperCAmelCase__ : Any = logging.get_logger("""transformers.generation.tf_utils""" )
else:
UpperCAmelCase__ : Union[str, Any] = logging.get_logger("""transformers.generation.utils""" )
UpperCAmelCase__ : Optional[int] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_lowerCamelCase ) as cl:
UpperCAmelCase__ : List[str] = text_generator(_lowerCamelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(_lowerCamelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_lowerCamelCase ) as cl:
UpperCAmelCase__ : Any = text_generator(_lowerCamelCase , max_new_tokens=1 )
self.assertNotIn(_lowerCamelCase , cl.out )
with CaptureLogger(_lowerCamelCase ) as cl:
UpperCAmelCase__ : Optional[Any] = text_generator(_lowerCamelCase , max_length=10 )
self.assertNotIn(_lowerCamelCase , cl.out )
| 171 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
return "\n".join(
F"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 360 | """simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__A = "hf-internal-testing/tiny-random-bert"
__A = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__A = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: Union[str, Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) )
with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f:
lowercase__: Dict = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(os.path.isfile(_UpperCAmelCase ) )
# File is cached at the same place the second time.
lowercase__: Any = cached_file(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# Using a specific revision to test the full commit hash.
lowercase__: Dict = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''9b8c223''' )
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) )
def _snake_case ( self ):
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ):
lowercase__: int = cached_file('''tiny-random-bert''' , _UpperCAmelCase )
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ):
lowercase__: List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ):
lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' )
def _snake_case ( self ):
with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ):
lowercase__: Optional[Any] = cached_file(_UpperCAmelCase , '''conf''' )
with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f:
lowercase__: int = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '''.no_exist''' , _UpperCAmelCase , '''conf''' ) ) )
lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__: List[str] = cached_file(_UpperCAmelCase , '''conf''' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__: Union[str, Any] = mock.Mock()
lowercase__: str = 500
lowercase__: Union[str, Any] = {}
lowercase__: List[str] = HTTPError
lowercase__: int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=_UpperCAmelCase ) as mock_head:
lowercase__: Any = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
def _snake_case ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , _UpperCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase , revision='''ahaha''' )
lowercase__: Optional[Any] = get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase__: Optional[Any] = json.loads(open(_UpperCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__: Any = Path(_UpperCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , '''a.txt''' ) , str(_UpperCAmelCase ) )
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , '''b.txt''' ) )
| 2 | 0 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class snake_case__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[Any], _snake_case : Dict[str, int], _snake_case : List[str], _snake_case : int = None, _snake_case : int = None ) ->Tuple:
super().__init__()
snake_case__ : str = pad_token_id
snake_case__ : List[Any] = max_length
snake_case__ : str = vocab
snake_case__ : List[Any] = merges
snake_case__ : int = BytePairTokenizer(_snake_case, _snake_case, sequence_length=_snake_case )
@classmethod
def lowercase_ ( cls : Optional[int], _snake_case : GPTaTokenizer, *_snake_case : Any, **_snake_case : Any ) ->List[Any]:
snake_case__ : Tuple = [' '.join(_snake_case ) for m in tokenizer.bpe_ranks.keys()]
snake_case__ : int = tokenizer.get_vocab()
return cls(_snake_case, _snake_case, *_snake_case, **_snake_case )
@classmethod
def lowercase_ ( cls : Tuple, _snake_case : Union[str, os.PathLike], *_snake_case : List[str], **_snake_case : Tuple ) ->Dict:
snake_case__ : List[Any] = GPTaTokenizer.from_pretrained(_snake_case, *_snake_case, **_snake_case )
return cls.from_tokenizer(_snake_case, *_snake_case, **_snake_case )
@classmethod
def lowercase_ ( cls : int, _snake_case : List[Any] ) ->Dict:
return cls(**_snake_case )
def lowercase_ ( self : Union[str, Any] ) ->Tuple:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase_ ( self : str, _snake_case : Any, _snake_case : int = None ) ->int:
snake_case__ : Optional[Any] = self.tf_tokenizer(_snake_case )
snake_case__ : Tuple = tf.ones_like(_snake_case )
if self.pad_token_id is not None:
# pad the tokens up to max length
snake_case__ : Any = max_length if max_length is not None else self.max_length
if max_length is not None:
snake_case__ , snake_case__ : Union[str, Any] = pad_model_inputs(
_snake_case, max_seq_length=_snake_case, pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 277 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ :Optional[int] = logging.get_logger(__name__)
a_ :Dict = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """openai-gpt"""
_SCREAMING_SNAKE_CASE = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int], _snake_case : Dict=4_0_4_7_8, _snake_case : str=5_1_2, _snake_case : int=7_6_8, _snake_case : Tuple=1_2, _snake_case : Any=1_2, _snake_case : str="gelu", _snake_case : List[str]=0.1, _snake_case : Any=0.1, _snake_case : Dict=0.1, _snake_case : int=1e-5, _snake_case : Optional[Any]=0.0_2, _snake_case : List[Any]="cls_index", _snake_case : Any=True, _snake_case : Any=None, _snake_case : int=True, _snake_case : Optional[Any]=0.1, **_snake_case : List[Any], ) ->Optional[int]:
snake_case__ : int = vocab_size
snake_case__ : Dict = n_positions
snake_case__ : str = n_embd
snake_case__ : str = n_layer
snake_case__ : List[Any] = n_head
snake_case__ : List[Any] = afn
snake_case__ : Optional[Any] = resid_pdrop
snake_case__ : List[str] = embd_pdrop
snake_case__ : List[Any] = attn_pdrop
snake_case__ : Optional[int] = layer_norm_epsilon
snake_case__ : str = initializer_range
snake_case__ : List[str] = summary_type
snake_case__ : Optional[int] = summary_use_proj
snake_case__ : List[str] = summary_activation
snake_case__ : Optional[Any] = summary_first_dropout
snake_case__ : int = summary_proj_to_labels
super().__init__(**_snake_case )
| 277 | 1 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__lowerCAmelCase = pd.read_csv("""sample_data.csv""", header=None)
__lowerCAmelCase = df.shape[:1][0]
# If you're using some other dataset input the target column
__lowerCAmelCase = df.iloc[:, 1:2]
__lowerCAmelCase = actual_data.values.reshape(len_data, 1)
__lowerCAmelCase = MinMaxScaler().fit_transform(actual_data)
__lowerCAmelCase = 1_0
__lowerCAmelCase = 5
__lowerCAmelCase = 2_0
__lowerCAmelCase = len_data - periods * look_back
__lowerCAmelCase = actual_data[:division]
__lowerCAmelCase = actual_data[division - look_back :]
__lowerCAmelCase , __lowerCAmelCase = [], []
__lowerCAmelCase , __lowerCAmelCase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__lowerCAmelCase = np.array(train_x)
__lowerCAmelCase = np.array(test_x)
__lowerCAmelCase = np.array([list(i.ravel()) for i in train_y])
__lowerCAmelCase = np.array([list(i.ravel()) for i in test_y])
__lowerCAmelCase = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
__lowerCAmelCase = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
__lowerCAmelCase = model.predict(x_test)
| 5 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ (__a : str , __a : Dict=0.999 , __a : List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_a : Tuple = []
for i in range(__a ):
_a : Union[str, Any] = i / num_diffusion_timesteps
_a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ) , __a ) )
return torch.tensor(__a , dtype=torch.floataa )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Dict = 2
@register_to_config
def __init__( self : str ,_a : int = 1000 ,_a : float = 0.0_0085 ,_a : float = 0.012 ,_a : str = "linear" ,_a : Optional[Union[np.ndarray, List[float]]] = None ,_a : str = "epsilon" ,_a : Optional[bool] = False ,_a : Optional[bool] = False ,_a : float = 1.0 ,_a : str = "linspace" ,_a : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
_a : List[str] = torch.tensor(_a ,dtype=torch.floataa )
elif beta_schedule == "linear":
_a : Tuple = torch.linspace(_a ,_a ,_a ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a : List[str] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_a ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a : Dict = betas_for_alpha_bar(_a ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_a : Tuple = betas_for_alpha_bar(_a ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_a : Optional[Any] = 1.0 - self.betas
_a : Optional[int] = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(_a ,_a ,_a )
_a : Optional[int] = use_karras_sigmas
def __lowercase ( self : Any ,_a : Union[str, Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
_a : List[Any] = self.timesteps
_a : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a : int = 1 if len(_a ) > 1 else 0
else:
_a : str = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_a : str = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
_a : List[Any] = self.index_for_timestep(_a )
_a : Tuple = self.sigmas[step_index]
_a : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self : Any ,_a : int ,_a : Union[str, torch.device] = None ,_a : Optional[int] = None ,):
'''simple docstring'''
_a : Optional[Any] = num_inference_steps
_a : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a : Optional[Any] = np.linspace(0 ,num_train_timesteps - 1 ,_a ,dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : int = (np.arange(0 ,_a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : Union[str, Any] = (np.arange(_a ,0 ,-step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_a : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a : Union[str, Any] = np.log(_a )
_a : str = np.interp(_a ,np.arange(0 ,len(_a ) ) ,_a )
if self.config.use_karras_sigmas:
_a : List[Any] = self._convert_to_karras(in_sigmas=_a ,num_inference_steps=self.num_inference_steps )
_a : Dict = np.array([self._sigma_to_t(_a ,_a ) for sigma in sigmas] )
_a : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a : Union[str, Any] = torch.from_numpy(_a ).to(device=_a )
_a : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a : List[Any] = torch.from_numpy(_a )
_a : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith('mps' ):
# mps does not support float64
_a : Tuple = timesteps.to(_a ,dtype=torch.floataa )
else:
_a : Dict = timesteps.to(device=_a )
# empty dt and derivative
_a : Tuple = None
_a : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a : Union[str, Any] = defaultdict(_a )
def __lowercase ( self : str ,_a : Dict ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = np.log(_a )
# get distribution
_a : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a : List[Any] = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a : Tuple = low_idx + 1
_a : Union[str, Any] = log_sigmas[low_idx]
_a : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
_a : Optional[Any] = (low - log_sigma) / (low - high)
_a : List[str] = np.clip(_a ,0 ,1 )
# transform interpolation to time range
_a : Union[str, Any] = (1 - w) * low_idx + w * high_idx
_a : List[str] = t.reshape(sigma.shape )
return t
def __lowercase ( self : int ,_a : torch.FloatTensor ,_a : Tuple ):
'''simple docstring'''
_a : float = in_sigmas[-1].item()
_a : float = in_sigmas[0].item()
_a : Tuple = 7.0 # 7.0 is the value used in the paper
_a : str = np.linspace(0 ,1 ,_a )
_a : Optional[Any] = sigma_min ** (1 / rho)
_a : Union[str, Any] = sigma_max ** (1 / rho)
_a : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.dt is None
def __lowercase ( self : int ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : Union[float, torch.FloatTensor] ,_a : Union[torch.FloatTensor, np.ndarray] ,_a : bool = True ,):
'''simple docstring'''
_a : Union[str, Any] = self.index_for_timestep(_a )
# advance index counter by 1
_a : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a : Tuple = self.sigmas[step_index]
_a : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a : List[str] = self.sigmas[step_index - 1]
_a : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a : Optional[int] = 0
_a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a : Dict = sigma_hat if self.state_in_first_order else sigma_next
_a : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
_a : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_a : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a : Any = sigma_next - sigma_hat
# store for 2nd order step
_a : int = derivative
_a : List[str] = dt
_a : Union[str, Any] = sample
else:
# 2. 2nd order / Heun's method
_a : Dict = (sample - pred_original_sample) / sigma_next
_a : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a : Optional[Any] = self.dt
_a : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a : List[Any] = None
_a : Union[str, Any] = None
_a : Dict = None
_a : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowercase ( self : Optional[int] ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,_a : torch.FloatTensor ,):
'''simple docstring'''
_a : str = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_a : Dict = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_a : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_a : int = self.timesteps.to(original_samples.device )
_a : Optional[Any] = timesteps.to(original_samples.device )
_a : Any = [self.index_for_timestep(_a ,_a ) for t in timesteps]
_a : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a : Optional[Any] = sigma.unsqueeze(-1 )
_a : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 5 | 1 |
def _a ( ):
return [list(range(1000 - i, -1000 - i, -1 ) ) for i in range(1000 )]
_lowerCamelCase =generate_large_matrix()
_lowerCamelCase =(
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _a ( lowerCamelCase ):
assert all(row == sorted(__lowercase, reverse=__lowercase ) for row in grid )
assert all(list(__lowercase ) == sorted(__lowercase, reverse=__lowercase ) for col in zip(*__lowercase ) )
def _a ( lowerCamelCase ):
lowerCamelCase : Tuple = 0
lowerCamelCase : Any = len(__lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCamelCase : Optional[int] = (left + right) // 2
lowerCamelCase : Tuple = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCamelCase : int = mid + 1
else:
lowerCamelCase : Union[str, Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowercase )
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[int] = 0
lowerCamelCase : str = len(grid[0] )
for i in range(len(__lowercase ) ):
lowerCamelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowercase ) * len(grid[0] )) - total
def _a ( lowerCamelCase ):
return len([number for row in grid for number in row if number < 0] )
def _a ( lowerCamelCase ):
lowerCamelCase : Optional[int] = 0
for row in grid:
for i, number in enumerate(__lowercase ):
if number < 0:
total += len(__lowercase ) - i
break
return total
def _a ( ):
from timeit import timeit
print("""Running benchmarks""" )
lowerCamelCase : List[Any] = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCamelCase : Optional[int] = timeit(F'''{func}(grid=grid)''', setup=__lowercase, number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 287 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Any = WavaVecaPhonemeCTCTokenizer
UpperCamelCase_ : Tuple = False
def _snake_case ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
A: Optional[int] = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
A: Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Any=20 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
A: int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )) for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
A: Optional[Any] = list(filter(lambda SCREAMING_SNAKE_CASE_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length:
A: int = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0:
while len(SCREAMING_SNAKE_CASE_ ) < min_length:
A: Dict = toks + toks
# toks_str = [t[1] for t in toks]
A: Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
A: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1:
A: int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
)
if with_prefix_space:
A: Tuple = ''' ''' + output_txt
A: List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
return output_txt, output_ids
def _snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
A: Any = tokenizer('''m xxx ɪ''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
A: Optional[int] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
A: str = tokenizer('''maɪ c''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [3, 2_00] ) # mai should be <unk> (=3)
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Any = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def _snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[str] = '''Hello how are you'''
A: Union[str, Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Union[str, Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
A: List[str] = tokenizer.decode(sample_ids[0] )
A: List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def _snake_case ( self : Any ) -> Optional[int]:
'''simple docstring'''
A: int = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: List[Any] = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def _snake_case ( self : List[str] ) -> int:
'''simple docstring'''
A: Optional[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Optional[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Dict ) -> Any:
'''simple docstring'''
A: Optional[int] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
A: str = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
A: Tuple = tokenizer.decode(sample_ids[0] )
A: Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
A: str = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def _snake_case ( self : int ) -> List[str]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Union[str, Any] = '''Hello how are you'''
A: Tuple = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Any = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Any:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Any = '''Hello how are you'''
A: List[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: List[Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A: List[str] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = '''Hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ).input_ids
A: Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
A: Any = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''ɛ l o h aʊ a ʁ j u''' )
def _snake_case ( self : str ) -> str:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: str = '''Hello how Are you'''
A: Union[str, Any] = '''hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
A: str = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
A: Tuple = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def _snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Any = [d[key] for d in offsets]
return retrieved_list
def _snake_case ( self : Any ) -> Tuple:
'''simple docstring'''
A: str = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
A: Union[str, Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
A: int = tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _snake_case ( self : Any ) -> List[Any]:
'''simple docstring'''
A: Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(isinstance(outputs_list[0] , SCREAMING_SNAKE_CASE_ ) )
# transform list to ModelOutput
A: Dict = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
[recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for la, la in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
A: int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ )
A: List[Any] = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) for ids in sample_ids]
check_list_tuples_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def _snake_case ( self : int ) -> int:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def _snake_case ( self : str ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def _snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def _snake_case ( self : Dict ) -> List[Any]:
'''simple docstring'''
pass
def _snake_case ( self : Tuple ) -> Any:
'''simple docstring'''
A: Any = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: str = tokenizer.vocab_size
A: str = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A: List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
A: List[Any] = tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size + len(SCREAMING_SNAKE_CASE_ ) )
A: Any = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A: str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
A: int = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size_a + len(SCREAMING_SNAKE_CASE_ ) )
A: int = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _snake_case ( self : str ) -> Tuple:
'''simple docstring'''
A: List[Any] = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: Union[str, Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
A: Union[str, Any] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(output['''text'''] , SCREAMING_SNAKE_CASE_ )
| 319 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class a__ ( UpperCAmelCase__ ):
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(a , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(a , '''num_encoder_blocks''' ) )
class a__ :
def __init__( self : Any , a : List[str] , a : Dict=13 , a : str=64 , a : List[Any]=3 , a : int=4 , a : Union[str, Any]=[2, 2, 2, 2] , a : Optional[Any]=[8, 4, 2, 1] , a : Tuple=[16, 32, 64, 1_28] , a : Optional[Any]=[1, 4, 8, 16] , a : Dict=[1, 2, 4, 8] , a : List[str]=True , a : Tuple=True , a : Optional[int]="gelu" , a : str=0.1 , a : Dict=0.1 , a : Optional[Any]=0.02 , a : List[str]=3 , a : int=None , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = sr_ratios
__lowerCamelCase = depths
__lowerCamelCase = hidden_sizes
__lowerCamelCase = downsampling_rates
__lowerCamelCase = num_attention_heads
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = scope
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : Optional[Any] , a : Any , a : List[str] ):
"""simple docstring"""
__lowerCamelCase = SegformerModel(config=a )
model.to(a )
model.eval()
__lowerCamelCase = model(a )
__lowerCamelCase = __lowerCamelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : List[str] , a : Optional[Any] , a : Dict ):
"""simple docstring"""
__lowerCamelCase = self.num_labels
__lowerCamelCase = SegformerForSemanticSegmentation(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__lowerCamelCase = model(a , labels=a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Dict , a : int , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = 1
__lowerCamelCase = SegformerForSemanticSegmentation(config=a )
model.to(a )
model.eval()
__lowerCamelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(a )
__lowerCamelCase = model(a , labels=a )
self.parent.assertGreater(result.loss , 0.0 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Optional[int] =(
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[Any] =(
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase : List[Any] =True
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : Optional[Any] =False
lowerCamelCase : Tuple =False
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = SegformerModelTester(self )
__lowerCamelCase = SegformerConfigTester(self , config_class=a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*a )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(a )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = True
for model_class in self.all_model_classes:
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = True
__lowerCamelCase = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(a , a ) )
__lowerCamelCase = outputs.attentions
__lowerCamelCase = sum(self.model_tester.depths )
self.assertEqual(len(a ) , a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCamelCase = True
__lowerCamelCase = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(a , a ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(a ) , a )
# verify the first attentions (first block, first layer)
__lowerCamelCase = (self.model_tester.image_size // 4) ** 2
__lowerCamelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__lowerCamelCase = (self.model_tester.image_size // 32) ** 2
__lowerCamelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__lowerCamelCase = len(a )
# Check attention is always last and order is fine
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(a ) , a )
# verify the first attentions (first block, first layer)
__lowerCamelCase = (self.model_tester.image_size // 4) ** 2
__lowerCamelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
def check_hidden_states_output(a : List[Any] , a : List[str] , a : Tuple ):
__lowerCamelCase = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(a , a ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(a ) , a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(a , a , a )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
if not self.model_tester.is_training:
return
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(a ):
continue
__lowerCamelCase = model_class(a )
model.to(a )
model.train()
__lowerCamelCase = self._prepare_for_class(a , a , return_labels=a )
__lowerCamelCase = model(**a ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = SegformerModel.from_pretrained(a )
self.assertIsNotNone(a )
def __lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class a__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=a , align=a , do_random_crop=a )
__lowerCamelCase = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
a )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=a , return_tensors='''pt''' )
__lowerCamelCase = encoded_inputs.pixel_values.to(a )
with torch.no_grad():
__lowerCamelCase = model(a )
__lowerCamelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , a )
__lowerCamelCase = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=a , align=a , do_random_crop=a )
__lowerCamelCase = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(a )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=a , return_tensors='''pt''' )
__lowerCamelCase = encoded_inputs.pixel_values.to(a )
with torch.no_grad():
__lowerCamelCase = model(a )
__lowerCamelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , a )
__lowerCamelCase = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , a , atol=1e-1 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=a , align=a , do_random_crop=a )
__lowerCamelCase = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
a )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=a , return_tensors='''pt''' )
__lowerCamelCase = encoded_inputs.pixel_values.to(a )
with torch.no_grad():
__lowerCamelCase = model(a )
__lowerCamelCase = outputs.logits.detach().cpu()
__lowerCamelCase = image_processor.post_process_semantic_segmentation(outputs=a , target_sizes=[(5_00, 3_00)] )
__lowerCamelCase = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , a )
__lowerCamelCase = image_processor.post_process_semantic_segmentation(outputs=a )
__lowerCamelCase = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , a )
| 357 | '''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : List[Any] ="unispeech-sat"
def __init__( self : Dict , a : str=32 , a : Any=7_68 , a : Optional[Any]=12 , a : Optional[int]=12 , a : int=30_72 , a : int="gelu" , a : Dict=0.1 , a : Dict=0.1 , a : List[Any]=0.1 , a : Tuple=0.0 , a : Optional[Any]=0.0 , a : Tuple=0.1 , a : List[Any]=0.1 , a : str=0.02 , a : List[Any]=1e-5 , a : int="group" , a : Union[str, Any]="gelu" , a : Optional[int]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , a : List[Any]=(5, 2, 2, 2, 2, 2, 2) , a : int=(10, 3, 3, 3, 3, 2, 2) , a : Optional[Any]=False , a : Any=1_28 , a : Tuple=16 , a : str=False , a : Optional[Any]=True , a : Dict=0.05 , a : List[Any]=10 , a : Any=2 , a : Optional[Any]=0.0 , a : Optional[Any]=10 , a : Any=0 , a : Any=3_20 , a : str=2 , a : List[str]=0.1 , a : List[str]=1_00 , a : List[str]=2_56 , a : str=2_56 , a : Dict=0.1 , a : Optional[Any]="mean" , a : str=False , a : Tuple=False , a : Optional[Any]=2_56 , a : int=(5_12, 5_12, 5_12, 5_12, 15_00) , a : int=(5, 3, 3, 1, 1) , a : Any=(1, 2, 3, 1, 1) , a : Union[str, Any]=5_12 , a : Optional[int]=0 , a : Optional[int]=1 , a : Optional[int]=2 , a : int=5_04 , **a : Dict , ):
"""simple docstring"""
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layerdrop
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
__lowerCamelCase = num_clusters
__lowerCamelCase = do_stable_layer_norm
__lowerCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowerCamelCase = num_codevectors_per_group
__lowerCamelCase = num_codevector_groups
__lowerCamelCase = contrastive_logits_temperature
__lowerCamelCase = feat_quantizer_dropout
__lowerCamelCase = num_negatives
__lowerCamelCase = codevector_dim
__lowerCamelCase = proj_codevector_dim
__lowerCamelCase = diversity_loss_weight
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 237 | 0 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class a__ :
def __init__( self , _A , _A=9_9 , _A=1_3 , _A=1_6 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=3_2 , _A=4 , _A=4 , _A=3_0 , _A=0 , _A=1 , _A=2 , _A=None , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = decoder_seq_length
# For common tests
__lowerCAmelCase = self.decoder_seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_attention_mask
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_model
__lowerCAmelCase = d_model
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = decoder_start_token_id
__lowerCAmelCase = use_cache
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = None
__lowerCAmelCase = decoder_seq_length
__lowerCAmelCase = 2
__lowerCAmelCase = 1
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_attention_mask:
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowerCAmelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , ):
"""simple docstring"""
__lowerCAmelCase = True
__lowerCAmelCase = TrOCRDecoder(config=_A ).to(_A ).eval()
__lowerCAmelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__lowerCAmelCase = model(_A , use_cache=_A )
__lowerCAmelCase = model(_A )
__lowerCAmelCase = model(_A , use_cache=_A )
self.parent.assertTrue(len(_A ) == len(_A ) )
self.parent.assertTrue(len(_A ) == len(_A ) + 1 )
__lowerCAmelCase = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase = model(_A )["last_hidden_state"]
__lowerCAmelCase = model(_A , past_key_values=_A )["last_hidden_state"]
# select random slice
__lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_A , _A , atol=1E-3 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class a__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_a : List[Any] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
_a : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
_a : Union[str, Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
_a : List[Any] = True
_a : Dict = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TrOCRStandaloneDecoderModelTester(self , is_training=_A )
__lowerCAmelCase = ConfigTester(self , config_class=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
| 92 |
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase ( _snake_case : List[Any] , _snake_case : Tuple , _snake_case : int ) ->List[Any]:
"""simple docstring"""
if openai_config_file == "":
__snake_case : Dict = OpenAIGPTConfig()
else:
__snake_case : int = OpenAIGPTConfig.from_json_file(_snake_case )
__snake_case : Tuple = OpenAIGPTModel(_snake_case )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
__snake_case : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__snake_case : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , _snake_case )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 102 | 0 |
def lowerCAmelCase__ ( _a : int = 2_00 ):
snake_case_ : Dict = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
snake_case_ : Optional[Any] = [0] * (pence + 1)
snake_case_ : Optional[int] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCamelCase_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 358 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> int:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
snake_case_ : List[Any] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Dict = "sshleifer/tiny-gpt2"
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Optional[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> int:
snake_case_ : List[Any] = "sgugger/tiny-distilbert-classification"
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , only_pretrain_model=_SCREAMING_SNAKE_CASE , )
snake_case_ : int = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : List[str] = "sshleifer/tiny-gpt2"
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Optional[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Union[str, Any] = "sshleifer/tiny-gpt2"
snake_case_ : List[str] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
snake_case_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : str = "sshleifer/tiny-gpt2"
snake_case_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
snake_case_ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : List[str] = "sshleifer/tiny-gpt2"
snake_case_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : str = "sshleifer/tiny-gpt2"
snake_case_ : str = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Optional[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
snake_case_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : List[str] = "patrickvonplaten/t5-tiny-random"
snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
snake_case_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : int = "sshleifer/tiny-gpt2"
snake_case_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Union[str, Any] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , save_to_csv=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , "inf_mem.csv" ) , env_info_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , "env.csv" ) , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Dict = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , "env.csv" ) ).exists() )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : int = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(_SCREAMING_SNAKE_CASE ):
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "sequential" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "cumulative" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "current" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_SCREAMING_SNAKE_CASE , "log.txt" ) , log_print=_SCREAMING_SNAKE_CASE , trace_memory_line_by_line=_SCREAMING_SNAKE_CASE , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Tuple = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , "log.txt" ) ).exists() )
| 36 | 0 |
'''simple docstring'''
from maths.prime_factors import prime_factors
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(__UpperCAmelCase, __UpperCAmelCase ):
snake_case_ = F"Input value of [number={number}] must be an integer"
raise TypeError(__UpperCAmelCase )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(__UpperCAmelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 251 | 0 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = data
lowerCAmelCase__ = [0X67452301, 0XEFCDAB89, 0X98BADCFE, 0X10325476, 0XC3D2E1F0]
@staticmethod
def UpperCamelCase__ ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XFFFFFFFF
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = B'\x80' + B'\x00' * (63 - (len(self.data ) + 8) % 64)
lowerCAmelCase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) )
return padded_data
def UpperCamelCase__ ( self ):
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = list(struct.unpack('>16L' , _UpperCamelCase ) ) + [0] * 64
for i in range(16 , 80 ):
lowerCAmelCase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.padding()
lowerCAmelCase__ = self.split_blocks()
for block in self.blocks:
lowerCAmelCase__ = self.expand_block(_UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowerCAmelCase__ = (b & c) | ((~b) & d)
lowerCAmelCase__ = 0X5A827999
elif 20 <= i < 40:
lowerCAmelCase__ = b ^ c ^ d
lowerCAmelCase__ = 0X6ED9EBA1
elif 40 <= i < 60:
lowerCAmelCase__ = (b & c) | (b & d) | (c & d)
lowerCAmelCase__ = 0X8F1BBCDC
elif 60 <= i < 80:
lowerCAmelCase__ = b ^ c ^ d
lowerCAmelCase__ = 0XCA62C1D6
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (
self.rotate(_UpperCamelCase , 5 ) + f + e + k + expanded_block[i] & 0XFFFFFFFF,
a,
self.rotate(_UpperCamelCase , 30 ),
c,
d,
)
lowerCAmelCase__ = (
self.h[0] + a & 0XFFFFFFFF,
self.h[1] + b & 0XFFFFFFFF,
self.h[2] + c & 0XFFFFFFFF,
self.h[3] + d & 0XFFFFFFFF,
self.h[4] + e & 0XFFFFFFFF,
)
return ("{:08x}" * 5).format(*self.h )
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = B'Test String'
assert SHAaHash(UpperCamelCase_ ).final_hash() == hashlib.shaa(UpperCamelCase_ ).hexdigest() # noqa: S324
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowerCAmelCase__ = f.read()
else:
lowerCAmelCase__ = bytes(UpperCamelCase_ , 'utf-8' )
print(SHAaHash(UpperCamelCase_ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 122 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__snake_case : Optional[Any] = TypeVar("""KEY""")
__snake_case : str = TypeVar("""VAL""")
@dataclass(frozen=__lowercase , slots=__lowercase)
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL]):
_SCREAMING_SNAKE_CASE : KEY
_SCREAMING_SNAKE_CASE : VAL
class __SCREAMING_SNAKE_CASE ( _Item):
def __init__( self ):
"""simple docstring"""
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __bool__( self ):
"""simple docstring"""
return False
__snake_case : int = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL]):
def __init__( self , _UpperCamelCase = 8 , _UpperCamelCase = 0.75 ):
"""simple docstring"""
lowerCAmelCase__ = initial_block_size
lowerCAmelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase__ = capacity_factor
lowerCAmelCase__ = 0
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return hash(_UpperCamelCase ) % len(self._buckets )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self._buckets[ind]
if not stored:
lowerCAmelCase__ = _Item(_UpperCamelCase , _UpperCamelCase )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase__ = _Item(_UpperCamelCase , _UpperCamelCase )
return True
else:
return False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self._buckets
lowerCAmelCase__ = [None] * new_size
lowerCAmelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self._get_bucket_index(_UpperCamelCase )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase__ = self._get_next_ind(_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(_UpperCamelCase ):
if self._try_set(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
break
def __setitem__( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(_UpperCamelCase , _UpperCamelCase )
def __delitem__( self , _UpperCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(_UpperCamelCase ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
raise KeyError(_UpperCamelCase )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , _UpperCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(_UpperCamelCase ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_UpperCamelCase )
def __len__( self ):
"""simple docstring"""
return self._len
def __iter__( self ):
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
"""simple docstring"""
lowerCAmelCase__ = ' ,'.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 122 | 1 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( a__,a__,unittest.TestCase ):
_a = IFPipeline
_a = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = PipelineTesterMixin.required_optional_params - {'''latents'''}
def a__ ( self ) -> Union[str, Any]:
return self._get_dummy_components()
def a__ ( self , _a , _a=0 ) -> List[Any]:
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
_A : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
_A : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
_A : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self ) -> Tuple:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def a__ ( self ) -> List[str]:
super().test_save_load_floataa(expected_max_diff=1e-1 )
def a__ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def a__ ( self ) -> Tuple:
self._test_save_load_local()
def a__ ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def a__ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> Optional[int]:
_A : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
_A : Dict = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
_A : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_A : List[str] = None
_A : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_A : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
_A : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_A : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
_A : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a__ ( self , _a , _a , _a , _a ) -> Optional[int]:
_start_torch_memory_measurement()
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
_A : int = output.images[0]
assert image.shape == (64, 64, 3)
_A : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_A : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
_A : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
_A : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
_A : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
_A : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a__ ( self , _a , _a , _a , _a ) -> str:
_start_torch_memory_measurement()
_A : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
_A : Dict = output.images[0]
assert image.shape == (64, 64, 3)
_A : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_A : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
_A : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
_A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
_A : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
_A : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
_A : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a__ ( self , _a , _a , _a , _a ) -> Union[str, Any]:
_start_torch_memory_measurement()
_A : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
_A : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
_A : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A : List[str] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
_A : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
_A : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_A : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
_A : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
_A : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
_A : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
_A : int = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
_A : Dict = output.images[0]
assert image.shape == (256, 256, 3)
_A : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 26 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE__ : Tuple = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE__ : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE__ : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE__ : Optional[int] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE__ : Tuple = self.__multiply()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE__ : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE__ : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_dft
SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2
return dft[0]
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dft("""A""" )
SCREAMING_SNAKE_CASE__ : Dict = self.__dft("""B""" )
SCREAMING_SNAKE_CASE__ : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE__ : List[str] = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE__ : Any = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE__ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__(self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE__ : int = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : int = (PNDMScheduler,)
UpperCamelCase_ : List[Any] = (("""num_inference_steps""", 50),)
def _snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Any:
'''simple docstring'''
A: Optional[Any] = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : int=0 , **SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
'''simple docstring'''
A: List[str] = dict(self.forward_default_kwargs )
A: Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE_ )
A: int = self.dummy_sample
A: Optional[int] = 0.1 * sample
A: List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A: List[str] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
A: int = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
A: Union[str, Any] = dummy_past_residuals[:]
A: Any = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
A: Union[str, Any] = new_scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A: Union[str, Any] = scheduler.step_plms(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
A: Tuple = new_scheduler.step_plms(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
pass
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = dict(self.forward_default_kwargs )
A: Optional[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = self.dummy_sample
A: Tuple = 0.1 * sample
A: List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A: Union[str, Any] = self.get_scheduler_config()
A: Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals (must be after setting timesteps)
A: str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE_ )
A: List[str] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residual (must be after setting timesteps)
A: str = dummy_past_residuals[:]
A: str = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
A: Any = new_scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A: int = scheduler.step_plms(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
A: Union[str, Any] = new_scheduler.step_plms(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
A: List[Any] = self.scheduler_classes[0]
A: Any = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
A: List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
A: List[str] = 10
A: str = self.dummy_model()
A: str = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.prk_timesteps ):
A: Any = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: str = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
A: Dict = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Dict = scheduler.step_plms(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
return sample
def _snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
A: List[str] = dict(self.forward_default_kwargs )
A: Optional[int] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE_ )
for scheduler_class in self.scheduler_classes:
A: Optional[int] = self.get_scheduler_config()
A: List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
A: List[str] = self.dummy_sample
A: Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE_ , '''set_timesteps''' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE_ , '''set_timesteps''' ):
A: Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A: Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A: Dict = dummy_past_residuals[:]
A: List[Any] = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
A: Union[str, Any] = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , 1 , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A: Optional[int] = scheduler.step_plms(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
A: Dict = scheduler.step_plms(SCREAMING_SNAKE_CASE_ , 1 , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self : Any ) -> Dict:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE_ )
A: Optional[int] = self.scheduler_classes[0]
A: List[str] = self.get_scheduler_config(steps_offset=1 )
A: int = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def _snake_case ( self : Any ) -> List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] ) -> List[str]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> str:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
A: Union[str, Any] = 27
for scheduler_class in self.scheduler_classes:
A: int = self.dummy_sample
A: List[Any] = 0.1 * sample
A: str = self.get_scheduler_config()
A: List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
A: str = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
def _snake_case ( self : int ) -> str:
'''simple docstring'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
A: List[str] = self.scheduler_classes[0]
A: Tuple = self.get_scheduler_config()
A: List[str] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def _snake_case ( self : int ) -> Tuple:
'''simple docstring'''
A: Optional[int] = self.full_loop()
A: int = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
A: Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def _snake_case ( self : Dict ) -> Tuple:
'''simple docstring'''
A: List[str] = self.full_loop(prediction_type='''v_prediction''' )
A: List[str] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
A: str = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def _snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
A: str = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
A: str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
A: Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def _snake_case ( self : Union[str, Any] ) -> int:
'''simple docstring'''
A: int = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
A: List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
A: int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 369 |
'''simple docstring'''
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
'''simple docstring'''
A: Tuple = None
A: Dict = None
A: Optional[int] = graph
self._normalize_graph(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: str = len(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = None
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> str:
'''simple docstring'''
if sources is int:
A: Union[str, Any] = [sources]
if sinks is int:
A: Tuple = [sinks]
if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) == 0:
return
A: List[str] = sources[0]
A: Optional[int] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE_ ) > 1 or len(SCREAMING_SNAKE_CASE_ ) > 1:
A: Any = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A: Dict = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A: Optional[Any] = max_input_flow
A: Optional[Any] = 0
A: str = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A: Optional[Any] = max_input_flow
A: str = size - 1
def _snake_case ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A: Optional[Any] = algorithm(self )
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
A: str = flow_network
A: List[str] = flow_network.verticesCount
A: Dict = flow_network.sourceIndex
A: Any = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A: str = flow_network.graph
A: str = False
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
if not self.executed:
self._algorithm()
A: str = True
def _snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ )
# use this to save your result
A: Any = -1
def _snake_case ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[int]:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ )
A: Optional[int] = [[0] * self.verticies_count for i in range(self.verticies_count )]
A: Any = [0] * self.verticies_count
A: Optional[Any] = [0] * self.verticies_count
def _snake_case ( self : str ) -> Optional[Any]:
'''simple docstring'''
A: Any = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A: str = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A: Dict = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
A: Any = vertices_list[i]
A: str = self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE_ ) )
A: Tuple = 0
else:
i += 1
A: Tuple = sum(self.preflow[self.source_index] )
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.relabel(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
'''simple docstring'''
A: Optional[int] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> int:
'''simple docstring'''
A: Optional[Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A: List[Any] = self.heights[to_index]
if min_height is not None:
A: int = min_height + 1
if __name__ == "__main__":
UpperCamelCase = [0]
UpperCamelCase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCamelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCamelCase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCamelCase = flow_network.find_maximum_flow()
print(f'maximum flow is {maximum_flow}')
| 334 | 0 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _lowercase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
lowercase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : Optional[int] , snake_case : int , snake_case : Tuple=False ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Tuple = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
UpperCamelCase_ : Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _lowercase ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , snake_case : Tuple , snake_case : Dict=1_3 , snake_case : Optional[int]=7 , snake_case : str=True , snake_case : List[Any]=True , snake_case : Dict=True , snake_case : int=True , snake_case : List[str]=9_9 , snake_case : List[str]=3_2 , snake_case : str=3_2 , snake_case : List[Any]=2 , snake_case : int=4 , snake_case : Any=3_7 , snake_case : List[str]="gelu" , snake_case : List[str]=0.1 , snake_case : Union[str, Any]=0.1 , snake_case : Optional[int]=5_1_2 , snake_case : List[str]=1_6 , snake_case : Any=2 , snake_case : Optional[int]=0.02 , snake_case : Tuple=3 , snake_case : int=4 , snake_case : Tuple=None , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Dict = parent
UpperCamelCase_ : str = batch_size
UpperCamelCase_ : int = seq_length
UpperCamelCase_ : Union[str, Any] = is_training
UpperCamelCase_ : Tuple = use_input_mask
UpperCamelCase_ : Union[str, Any] = use_token_type_ids
UpperCamelCase_ : Tuple = use_labels
UpperCamelCase_ : List[Any] = vocab_size
UpperCamelCase_ : Tuple = hidden_size
UpperCamelCase_ : Optional[Any] = num_hidden_layers
UpperCamelCase_ : List[str] = num_attention_heads
UpperCamelCase_ : Dict = intermediate_size
UpperCamelCase_ : Union[str, Any] = hidden_act
UpperCamelCase_ : int = hidden_dropout_prob
UpperCamelCase_ : str = attention_probs_dropout_prob
UpperCamelCase_ : Optional[Any] = max_position_embeddings
UpperCamelCase_ : Optional[int] = type_vocab_size
UpperCamelCase_ : Dict = type_sequence_label_size
UpperCamelCase_ : Union[str, Any] = initializer_range
UpperCamelCase_ : int = num_labels
UpperCamelCase_ : Union[str, Any] = num_choices
UpperCamelCase_ : Optional[int] = scope
UpperCamelCase_ : List[str] = embedding_size
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ : int = None
if self.use_token_type_ids:
UpperCamelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ : Any = None
UpperCamelCase_ : Union[str, Any] = None
UpperCamelCase_ : Any = None
if self.use_labels:
UpperCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ : Dict = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : int , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : str , snake_case : Any , snake_case : Optional[int] , snake_case : int ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : int = TFMobileBertModel(config=snake_case )
UpperCamelCase_ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : int = model(snake_case )
UpperCamelCase_ : Optional[int] = [input_ids, input_mask]
UpperCamelCase_ : Tuple = model(snake_case )
UpperCamelCase_ : List[str] = model(snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Dict , snake_case : str , snake_case : Dict , snake_case : int , snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[str] = TFMobileBertForMaskedLM(config=snake_case )
UpperCamelCase_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Dict , snake_case : Dict , snake_case : List[str] , snake_case : List[Any] , snake_case : List[Any] , snake_case : List[str] , snake_case : Tuple ) -> int:
"""simple docstring"""
UpperCamelCase_ : str = TFMobileBertForNextSentencePrediction(config=snake_case )
UpperCamelCase_ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : int = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : str , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Any = TFMobileBertForPreTraining(config=snake_case )
UpperCamelCase_ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : Optional[int] , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Tuple , snake_case : List[Any] , snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self.num_labels
UpperCamelCase_ : Dict = TFMobileBertForSequenceClassification(config=snake_case )
UpperCamelCase_ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : Any = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : int , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Tuple , snake_case : List[str] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.num_choices
UpperCamelCase_ : List[Any] = TFMobileBertForMultipleChoice(config=snake_case )
UpperCamelCase_ : Dict = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : Optional[Any] = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : List[str] = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCamelCase_ : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Any , snake_case : Dict , snake_case : Dict , snake_case : Optional[Any] , snake_case : Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.num_labels
UpperCamelCase_ : str = TFMobileBertForTokenClassification(config=snake_case )
UpperCamelCase_ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : Optional[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Tuple , snake_case : int , snake_case : Optional[int] , snake_case : List[str] , snake_case : Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Any = TFMobileBertForQuestionAnswering(config=snake_case )
UpperCamelCase_ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.prepare_config_and_inputs()
(
UpperCamelCase_
) : Union[str, Any] = config_and_inputs
UpperCamelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Any = TFMobileBertModelTest.TFMobileBertModelTester(self )
UpperCamelCase_ : Optional[int] = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
UpperCamelCase_ : List[Any] = TFMobileBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
"""simple docstring"""
UpperCamelCase_ : Any = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
UpperCamelCase_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase_ : Union[str, Any] = model(snake_case )[0]
UpperCamelCase_ : List[str] = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , snake_case )
UpperCamelCase_ : Optional[Any] = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case , atol=1e-4 )
| 175 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = """xlm-roberta"""
def __init__( self : Any , lowerCAmelCase : Tuple=3_0522 , lowerCAmelCase : Tuple=768 , lowerCAmelCase : Any=12 , lowerCAmelCase : str=12 , lowerCAmelCase : Any=3072 , lowerCAmelCase : int="gelu" , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : List[str]=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : int=1E-12 , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Any=2 , lowerCAmelCase : int="absolute" , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Dict=None , **lowerCAmelCase : Any , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase)
_snake_case : List[Any] = vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : List[Any] = hidden_act
_snake_case : Tuple = intermediate_size
_snake_case : Any = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : List[Any] = max_position_embeddings
_snake_case : List[str] = type_vocab_size
_snake_case : Optional[int] = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Optional[Any] = position_embedding_type
_snake_case : Tuple = use_cache
_snake_case : Optional[Any] = classifier_dropout
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_snake_case : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
| 317 | 0 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = F"Input value of [number={number}] must be an integer"
raise TypeError(_SCREAMING_SNAKE_CASE )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(_SCREAMING_SNAKE_CASE ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 244 |
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (position - 1) // 2
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (2 * position) + 1
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (2 * position) + 2
class _lowerCamelCase ( Generic[T] ):
def __init__(self ) -> None:
UpperCamelCase = []
UpperCamelCase = {}
UpperCamelCase = 0
def __len__(self ) -> int:
return self.elements
def __repr__(self ) -> str:
return str(self.heap )
def snake_case_ (self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def snake_case_ (self , __a , __a ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
UpperCamelCase = self.elements
self.elements += 1
self._bubble_up(__a )
def snake_case_ (self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCamelCase , UpperCamelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCamelCase , UpperCamelCase = self.heap[0]
self._bubble_down(__a )
return elem
def snake_case_ (self , __a , __a ) -> None:
# Update the weight of the given key
UpperCamelCase = self.position_map[elem]
UpperCamelCase = (elem, weight)
if position > 0:
UpperCamelCase = get_parent_position(__a )
UpperCamelCase , UpperCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__a )
else:
self._bubble_down(__a )
else:
self._bubble_down(__a )
def snake_case_ (self , __a ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
UpperCamelCase = self.position_map[elem]
if curr_pos == 0:
return None
UpperCamelCase = get_parent_position(__a )
UpperCamelCase , UpperCamelCase = self.heap[curr_pos]
UpperCamelCase , UpperCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__a , __a )
return self._bubble_up(__a )
return None
def snake_case_ (self , __a ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
UpperCamelCase = self.position_map[elem]
UpperCamelCase , UpperCamelCase = self.heap[curr_pos]
UpperCamelCase = get_child_left_position(__a )
UpperCamelCase = get_child_right_position(__a )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCamelCase , UpperCamelCase = self.heap[child_left_position]
UpperCamelCase , UpperCamelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__a , __a )
return self._bubble_down(__a )
if child_left_position < self.elements:
UpperCamelCase , UpperCamelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__a , __a )
return self._bubble_down(__a )
else:
return None
if child_right_position < self.elements:
UpperCamelCase , UpperCamelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__a , __a )
return self._bubble_down(__a )
return None
def snake_case_ (self , __a , __a ) -> None:
# Swap the nodes at the given positions
UpperCamelCase = self.heap[nodea_pos][0]
UpperCamelCase = self.heap[nodea_pos][0]
UpperCamelCase , UpperCamelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCamelCase = nodea_pos
UpperCamelCase = nodea_pos
class _lowerCamelCase ( Generic[T] ):
def __init__(self ) -> None:
UpperCamelCase = {}
UpperCamelCase = 0
def __repr__(self ) -> str:
return str(self.connections )
def __len__(self ) -> int:
return self.nodes
def snake_case_ (self , __a ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
UpperCamelCase = {}
self.nodes += 1
def snake_case_ (self , __a , __a , __a ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__a )
self.add_node(__a )
UpperCamelCase = weight
UpperCamelCase = weight
def a__ ( _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase = {node: maxsize for node in graph.connections}
UpperCamelCase = {node: None for node in graph.connections}
UpperCamelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCamelCase = priority_queue.extract_min()
UpperCamelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] )
UpperCamelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCamelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] )
UpperCamelCase = node
return dist, parent
| 244 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase ) -> list:
UpperCAmelCase__ : int = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCAmelCase__ : Any = True
for i in range(0 , len(lowerCAmelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase__ : Optional[int] = False
for i in range(1 , len(lowerCAmelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase__ : Tuple = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_A = [int(x) for x in input().split()]
# inputing elements of the list in one line
_A = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 171 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_A = logging.get_logger(__name__)
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__(self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = True , **_lowerCamelCase , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"""shortest_edge""": 224}
UpperCAmelCase__ : List[Any] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
UpperCAmelCase__ : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
UpperCAmelCase__ : str = get_size_dict(_lowerCamelCase , param_name="""crop_size""" )
UpperCAmelCase__ : int = do_resize
UpperCAmelCase__ : Any = size
UpperCAmelCase__ : int = resample
UpperCAmelCase__ : Union[str, Any] = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : str = do_center_crop
UpperCAmelCase__ : Dict = crop_size
UpperCAmelCase__ : List[str] = do_flip_channel_order
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PIL.Image.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(_lowerCamelCase , size=size["""shortest_edge"""] , default_to_square=_lowerCamelCase )
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(_lowerCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
return flip_channel_order(_lowerCamelCase , data_format=_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Any = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : List[str] = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Tuple = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : str = get_size_dict(_lowerCamelCase , param_name="""crop_size""" )
UpperCAmelCase__ : List[str] = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Union[str, Any] = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
UpperCAmelCase__ : Tuple = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Optional[Any] = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images]
if do_rescale:
UpperCAmelCase__ : List[Any] = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCAmelCase__ : Any = [self.flip_channel_order(image=_lowerCamelCase ) for image in images]
UpperCAmelCase__ : int = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
UpperCAmelCase__ : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = target_sizes.numpy()
UpperCAmelCase__ : Tuple = []
for idx in range(len(_lowerCamelCase ) ):
UpperCAmelCase__ : Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_lowerCamelCase )
UpperCAmelCase__ : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowerCamelCase )
else:
UpperCAmelCase__ : str = logits.argmax(dim=1 )
UpperCAmelCase__ : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 171 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """BlipImageProcessor"""
lowerCAmelCase__ = """AutoTokenizer"""
def __init__(self :List[str] , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :Dict )-> Dict:
__A = False
super().__init__(_UpperCamelCase , _UpperCamelCase )
__A = self.image_processor
def __call__(self :int , _UpperCamelCase :ImageInput = None , _UpperCamelCase :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCamelCase :bool = True , _UpperCamelCase :Union[bool, str, PaddingStrategy] = False , _UpperCamelCase :Union[bool, str, TruncationStrategy] = None , _UpperCamelCase :Optional[int] = None , _UpperCamelCase :int = 0 , _UpperCamelCase :Optional[int] = None , _UpperCamelCase :Optional[bool] = None , _UpperCamelCase :bool = False , _UpperCamelCase :bool = False , _UpperCamelCase :bool = False , _UpperCamelCase :bool = False , _UpperCamelCase :bool = False , _UpperCamelCase :bool = True , _UpperCamelCase :Optional[Union[str, TensorType]] = None , **_UpperCamelCase :List[Any] , )-> BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
__A = self.tokenizer
__A = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
return text_encoding
# add pixel_values
__A = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase )
if text is not None:
__A = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
else:
__A = None
if text_encoding is not None:
encoding_image_processor.update(_UpperCamelCase )
return encoding_image_processor
def _lowerCAmelCase (self :Any , *_UpperCamelCase :Tuple , **_UpperCamelCase :Any )-> Union[str, Any]:
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCAmelCase (self :Union[str, Any] , *_UpperCamelCase :Union[str, Any] , **_UpperCamelCase :List[str] )-> int:
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCAmelCase (self :int )-> Dict:
__A = self.tokenizer.model_input_names
__A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 250 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : List[Any] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
snake_case__ : List[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
snake_case__ : Optional[Any] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
snake_case__ : Optional[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
snake_case__ : Tuple = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
snake_case__ : Optional[int] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
snake_case__ : Tuple = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
snake_case__ : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
snake_case__ : List[Any] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
snake_case__ : List[str] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
snake_case__ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
snake_case__ : Dict = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
snake_case__ : Union[str, Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
snake_case__ : Optional[int] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
snake_case__ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
snake_case__ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
snake_case__ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
snake_case__ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
snake_case__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
snake_case__ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
snake_case__ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
snake_case__ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
snake_case__ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
snake_case__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
snake_case__ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_MAPPING
snake_case__ : List[Any] = auto_class_update(FlaxAutoModel)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
snake_case__ : Any = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
snake_case__ : Any = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
snake_case__ : Any = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case__ : List[Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case__ : int = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
snake_case__ : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case__ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
snake_case__ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
snake_case__ : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
snake_case__ : List[str] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
snake_case__ : Optional[int] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
snake_case__ : Optional[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 250 | 1 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = AudioClassificationPipeline(model=__magic_name__ , feature_extractor=__magic_name__ )
# test with a raw waveform
SCREAMING_SNAKE_CASE_ = np.zeros((34_000,) )
SCREAMING_SNAKE_CASE_ = np.zeros((14_000,) )
return audio_classifier, [audioa, audio]
def __A ( self : Any , __magic_name__ : Dict , __magic_name__ : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = examples
SCREAMING_SNAKE_CASE_ = audio_classifier(__magic_name__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
__magic_name__ , [
{"score": ANY(__magic_name__ ), "label": ANY(__magic_name__ )},
{"score": ANY(__magic_name__ ), "label": ANY(__magic_name__ )},
] , )
SCREAMING_SNAKE_CASE_ = audio_classifier(__magic_name__ , top_k=1 )
self.assertEqual(
__magic_name__ , [
{"score": ANY(__magic_name__ ), "label": ANY(__magic_name__ )},
] , )
self.run_torchaudio(__magic_name__ )
@require_torchaudio
def __A ( self : Tuple , __magic_name__ : Tuple ) -> Optional[Any]:
import datasets
# test with a local file
SCREAMING_SNAKE_CASE_ = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
SCREAMING_SNAKE_CASE_ = dataset[0]["audio"]["array"]
SCREAMING_SNAKE_CASE_ = audio_classifier(__magic_name__ )
self.assertEqual(
__magic_name__ , [
{"score": ANY(__magic_name__ ), "label": ANY(__magic_name__ )},
{"score": ANY(__magic_name__ ), "label": ANY(__magic_name__ )},
] , )
@require_torch
def __A ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = "anton-l/wav2vec2-random-tiny-classifier"
SCREAMING_SNAKE_CASE_ = pipeline("audio-classification" , model=__magic_name__ )
SCREAMING_SNAKE_CASE_ = np.ones((8_000,) )
SCREAMING_SNAKE_CASE_ = audio_classifier(__magic_name__ , top_k=4 )
SCREAMING_SNAKE_CASE_ = [
{"score": 0.0842, "label": "no"},
{"score": 0.0838, "label": "up"},
{"score": 0.0837, "label": "go"},
{"score": 0.0834, "label": "right"},
]
SCREAMING_SNAKE_CASE_ = [
{"score": 0.0845, "label": "stop"},
{"score": 0.0844, "label": "on"},
{"score": 0.0841, "label": "right"},
{"score": 0.0834, "label": "left"},
]
self.assertIn(nested_simplify(__magic_name__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
SCREAMING_SNAKE_CASE_ = {"array": np.ones((8_000,) ), "sampling_rate": audio_classifier.feature_extractor.sampling_rate}
SCREAMING_SNAKE_CASE_ = audio_classifier(__magic_name__ , top_k=4 )
self.assertIn(nested_simplify(__magic_name__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self : Optional[Any] ) -> Optional[int]:
import datasets
SCREAMING_SNAKE_CASE_ = "superb/wav2vec2-base-superb-ks"
SCREAMING_SNAKE_CASE_ = pipeline("audio-classification" , model=__magic_name__ )
SCREAMING_SNAKE_CASE_ = datasets.load_dataset("anton-l/superb_dummy" , "ks" , split="test" )
SCREAMING_SNAKE_CASE_ = np.array(dataset[3]["speech"] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = audio_classifier(__magic_name__ , top_k=4 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=3 ) , [
{"score": 0.981, "label": "go"},
{"score": 0.007, "label": "up"},
{"score": 0.006, "label": "_unknown_"},
{"score": 0.001, "label": "down"},
] , )
@require_tf
@unittest.skip("Audio classification is not implemented for TF" )
def __A ( self : str ) -> Dict:
pass
| 118 | import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A : str = logging.get_logger(__name__)
def a__ ( __UpperCamelCase , __UpperCamelCase=False ):
SCREAMING_SNAKE_CASE_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
SCREAMING_SNAKE_CASE_ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_ = ""
else:
SCREAMING_SNAKE_CASE_ = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-config.hidden_size :]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = dct.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = val
def a__ ( ):
SCREAMING_SNAKE_CASE_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = DeiTConfig()
# all deit models have fine-tuned heads
SCREAMING_SNAKE_CASE_ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE_ = 1_0_0_0
SCREAMING_SNAKE_CASE_ = "huggingface/label-files"
SCREAMING_SNAKE_CASE_ = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = int(deit_name[-6:-4] )
SCREAMING_SNAKE_CASE_ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
SCREAMING_SNAKE_CASE_ = 1_9_2
SCREAMING_SNAKE_CASE_ = 7_6_8
SCREAMING_SNAKE_CASE_ = 1_2
SCREAMING_SNAKE_CASE_ = 3
elif deit_name[9:].startswith("small" ):
SCREAMING_SNAKE_CASE_ = 3_8_4
SCREAMING_SNAKE_CASE_ = 1_5_3_6
SCREAMING_SNAKE_CASE_ = 1_2
SCREAMING_SNAKE_CASE_ = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
SCREAMING_SNAKE_CASE_ = 1_0_2_4
SCREAMING_SNAKE_CASE_ = 4_0_9_6
SCREAMING_SNAKE_CASE_ = 2_4
SCREAMING_SNAKE_CASE_ = 1_6
# load original model from timm
SCREAMING_SNAKE_CASE_ = timm.create_model(__UpperCamelCase , pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE_ = timm_model.state_dict()
SCREAMING_SNAKE_CASE_ = create_rename_keys(__UpperCamelCase , __UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = DeiTForImageClassificationWithTeacher(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
SCREAMING_SNAKE_CASE_ = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
SCREAMING_SNAKE_CASE_ = DeiTImageProcessor(size=__UpperCamelCase , crop_size=config.image_size )
SCREAMING_SNAKE_CASE_ = image_processor(images=prepare_img() , return_tensors="pt" )
SCREAMING_SNAKE_CASE_ = encoding["pixel_values"]
SCREAMING_SNAKE_CASE_ = model(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A : Dict = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 118 | 1 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError('''String lengths must match!''' )
A_ : Dict = 0
for chara, chara in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = StableDiffusionLDMaDPipeline
snake_case = TEXT_TO_IMAGE_PARAMS
snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self )->str:
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
A_ : str = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
A_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : str = CLIPTextModel(_SCREAMING_SNAKE_CASE )
A_ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A_ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 )->Dict:
'''simple docstring'''
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
A_ : Tuple = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
A_ : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->int:
'''simple docstring'''
A_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : List[Any] = self.get_dummy_components()
A_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**_SCREAMING_SNAKE_CASE )
A_ : Dict = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : Tuple = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : Optional[Any] = output.rgb, output.depth
A_ : List[Any] = rgb[0, -3:, -3:, -1]
A_ : Union[str, Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
A_ : Tuple = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
A_ : Union[str, Any] = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Optional[Any] = self.get_dummy_components()
A_ : Any = StableDiffusionLDMaDPipeline(**_SCREAMING_SNAKE_CASE )
A_ : List[str] = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : str = 3 * [inputs['''prompt''']]
# forward
A_ : Optional[Any] = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : Optional[int] = output.rgb, output.depth
A_ : Tuple = rgb_slice_a[0, -3:, -3:, -1]
A_ : Optional[Any] = depth_slice_a[0, -3:, -1]
A_ : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : Dict = 3 * [inputs.pop('''prompt''' )]
A_ : Tuple = ldmad_pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
A_ : Dict = text_inputs['''input_ids'''].to(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = ldmad_pipe.text_encoder(_SCREAMING_SNAKE_CASE )[0]
A_ : Optional[int] = prompt_embeds
# forward
A_ : Optional[int] = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : Any = output.rgb, output.depth
A_ : Any = rgb_slice_a[0, -3:, -3:, -1]
A_ : str = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : List[str] = self.get_dummy_components()
A_ : int = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
A_ : int = StableDiffusionLDMaDPipeline(**_SCREAMING_SNAKE_CASE )
A_ : str = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = '''french fries'''
A_ : Optional[Any] = ldmad_pipe(**_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE )
A_ , A_ : Optional[int] = output.rgb, output.depth
A_ : Optional[Any] = rgb[0, -3:, -3:, -1]
A_ : int = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
A_ : int = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
A_ : Any = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 )->Optional[int]:
'''simple docstring'''
A_ : Union[str, Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ : str = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 64, 64) )
A_ : int = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
A_ : int = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->str:
'''simple docstring'''
A_ : List[str] = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
A_ : Optional[Any] = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : int = self.get_inputs(_SCREAMING_SNAKE_CASE )
A_ : Dict = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : Union[str, Any] = output.rgb, output.depth
A_ : int = rgb[0, -3:, -3:, -1].flatten()
A_ : Union[str, Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
A_ : Tuple = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
A_ : Tuple = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 )->int:
'''simple docstring'''
A_ : Optional[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ : Any = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 64, 64) )
A_ : str = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Tuple = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = self.get_inputs(_SCREAMING_SNAKE_CASE )
A_ : str = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : List[Any] = output.rgb, output.depth
A_ : int = 0.4_9_5_5_8_6
A_ : Union[str, Any] = 0.3_3_7_9_5_5_1_5
A_ : Optional[int] = 1_1_2.4_8_5_1_8
A_ : Optional[Any] = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Any = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.get_inputs(_SCREAMING_SNAKE_CASE )
A_ : Tuple = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : Optional[Any] = output.rgb, output.depth
A_ : Tuple = 0.4_1_9_4_1_2_7
A_ : Union[str, Any] = 0.3_5_3_7_5_5_8_6
A_ : Union[str, Any] = 0.5_6_3_8_5_0_2
A_ : List[Any] = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 65 | 0 |
"""simple docstring"""
from ... import PretrainedConfig
A__ : List[Any] = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class lowercase__ ( A__ ):
_UpperCAmelCase :Optional[int] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
_UpperCAmelCase :Optional[Any] = "nezha"
def __init__( self : int , snake_case__ : Tuple=2_1128 , snake_case__ : str=768 , snake_case__ : str=12 , snake_case__ : Any=12 , snake_case__ : str=3072 , snake_case__ : str="gelu" , snake_case__ : str=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Union[str, Any]=512 , snake_case__ : Optional[Any]=64 , snake_case__ : Dict=2 , snake_case__ : str=0.02 , snake_case__ : List[str]=1E-12 , snake_case__ : List[Any]=0.1 , snake_case__ : List[Any]=0 , snake_case__ : Optional[int]=2 , snake_case__ : Dict=3 , snake_case__ : Dict=True , **snake_case__ : List[Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] =vocab_size
lowerCamelCase_ : Dict =hidden_size
lowerCamelCase_ : str =num_hidden_layers
lowerCamelCase_ : Any =num_attention_heads
lowerCamelCase_ : List[Any] =hidden_act
lowerCamelCase_ : List[Any] =intermediate_size
lowerCamelCase_ : List[str] =hidden_dropout_prob
lowerCamelCase_ : Optional[int] =attention_probs_dropout_prob
lowerCamelCase_ : Dict =max_position_embeddings
lowerCamelCase_ : int =max_relative_position
lowerCamelCase_ : Optional[int] =type_vocab_size
lowerCamelCase_ : List[Any] =initializer_range
lowerCamelCase_ : Any =layer_norm_eps
lowerCamelCase_ : Dict =classifier_dropout
lowerCamelCase_ : Optional[int] =use_cache
| 144 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__snake_case = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' )
UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 97 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase__ = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 182 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__lowerCAmelCase : List[str] = quote(_UpperCamelCase )
return hfh.hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' , revision=_UpperCamelCase ) | 182 | 1 |
import fire
from utils import calculate_rouge, save_json
def a( A : Dict , A : Any , A : Union[str, Any]=None , **A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
a = [x.strip() for x in open(A ).readlines()]
a = [x.strip() for x in open(A ).readlines()][: len(A )]
a = calculate_rouge(A , A , **A )
if save_path is not None:
save_json(A , A , indent=A )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 227 |
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.2_5) = }''')
print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''') | 308 | 0 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCAmelCase : List[Any] = TypeVar("""T""")
class __magic_name__ ( Generic[T] ):
'''simple docstring'''
__UpperCamelCase = 42 # Cache store of keys
__UpperCamelCase = 42 # References of the keys in cache
__UpperCamelCase = 10 # Maximum capacity of cache
def __init__( self , _a ):
"""simple docstring"""
lowerCamelCase = deque()
lowerCamelCase = set()
if not n:
lowerCamelCase = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
lowerCamelCase = n
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowerCamelCase = self.dq_store.pop()
self.key_reference.remove(_a )
else:
self.dq_store.remove(_a )
self.dq_store.appendleft(_a )
self.key_reference.add(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
for k in self.dq_store:
print(_a )
def __repr__( self ):
"""simple docstring"""
return f'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : Any = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 361 |
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase : Dict = 16
lowerCAmelCase : int = 32
def a__ ( snake_case__ ) -> Optional[Any]:
return int(x / 2**20 )
class __magic_name__ :
'''simple docstring'''
def __enter__( self ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCamelCase = torch.cuda.memory_allocated()
return self
def __exit__( self , *_a ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
lowerCamelCase = torch.cuda.memory_allocated()
lowerCamelCase = torch.cuda.max_memory_allocated()
lowerCamelCase = bamb(self.end - self.begin )
lowerCamelCase = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a__ ( snake_case__ , snake_case__ = 16 , snake_case__ = "bert-base-cased" , snake_case__ = 3_20 , snake_case__ = 1_60 , ) -> List[str]:
lowerCamelCase = AutoTokenizer.from_pretrained(snake_case__ )
lowerCamelCase = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": F'train[:{n_train}]', """validation""": F'validation[:{n_val}]'} )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(snake_case__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def a__ ( snake_case__ , snake_case__ ) -> Any:
# Initialize accelerator
lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase = config["""lr"""]
lowerCamelCase = int(config["""num_epochs"""] )
lowerCamelCase = int(config["""seed"""] )
lowerCamelCase = int(config["""batch_size"""] )
lowerCamelCase = args.model_name_or_path
set_seed(snake_case__ )
lowerCamelCase , lowerCamelCase = get_dataloaders(snake_case__ , snake_case__ , snake_case__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCamelCase = 1
lowerCamelCase = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
lowerCamelCase = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase = 0
# Now we train the model
lowerCamelCase = {}
for epoch in range(snake_case__ , snake_case__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(snake_case__ ):
lowerCamelCase = model(**snake_case__ )
lowerCamelCase = outputs.loss
lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCamelCase = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
def a__ ( ) -> str:
lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=snake_case__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=snake_case__ , )
parser.add_argument(
"""--output_dir""" , type=snake_case__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=snake_case__ , default=snake_case__ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=snake_case__ , default=3_20 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=snake_case__ , default=1_60 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=snake_case__ , default=1 , help="""Number of train epochs.""" , )
lowerCamelCase = parser.parse_args()
lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 168 | 0 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) if (len(__lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ), '''Stack'''.center(__lowerCamelCase ), '''Postfix'''.center(__lowerCamelCase ), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCamelCase ) == 0:
stack.append(__lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCamelCase ) # push x to stack
print(
x.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
while len(__lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
return "".join(__lowerCamelCase ) # return Postfix as str
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCamelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE_ = ''')''' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE_ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__UpperCAmelCase = input("\nEnter an Infix Equation = ") # Input an Infix equation
__UpperCAmelCase = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 299 | 0 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowercase_ ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
UpperCamelCase_ : List[Any] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def snake_case_() -> int:
"""simple docstring"""
if os.name == "nt":
_snake_case = CursorInfo()
_snake_case = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
_snake_case = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def snake_case_() -> Optional[Any]:
"""simple docstring"""
if os.name == "nt":
_snake_case = CursorInfo()
_snake_case = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
_snake_case = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def snake_case_() -> int:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 278 |
__A = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def snake_case_(_UpperCamelCase ) -> bytes:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
_snake_case = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_UpperCamelCase )
_snake_case = ''''''.join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data )
_snake_case = len(_UpperCamelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_snake_case = b'''=''' * ((6 - len(_UpperCamelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6)
else:
_snake_case = b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode()
+ padding
)
def snake_case_(_UpperCamelCase ) -> bytes:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ):
_snake_case = (
'''argument should be a bytes-like object or ASCII string, '''
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_UpperCamelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_UpperCamelCase , _UpperCamelCase ):
try:
_snake_case = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
_snake_case = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_snake_case = encoded_data[:-padding]
_snake_case = ''''''.join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_snake_case = ''''''.join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )
_snake_case = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_UpperCamelCase ) , 8 )
]
return bytes(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278 | 1 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a : Tuple = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a : str = logging.getLogger()
def __magic_name__ ( ) -> Any:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
snake_case_ = parser.parse_args()
return args.f
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase="eval" ) -> Tuple:
'''simple docstring'''
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE_, F"{split}_results.json" )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_, '''r''' ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
raise ValueError(F"can't find {path}" )
a : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class a ( snake_case__ ):
def A_ ( self : str ):
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_A , '''argv''' , _A ):
run_flax_glue.main()
snake_case_ = get_results(_A )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def A_ ( self : Optional[Any] ):
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_A , '''argv''' , _A ):
run_clm_flax.main()
snake_case_ = get_results(_A )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def A_ ( self : Union[str, Any] ):
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_A , '''argv''' , _A ):
run_summarization_flax.main()
snake_case_ = get_results(_A , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def A_ ( self : Union[str, Any] ):
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_A , '''argv''' , _A ):
run_mlm_flax.main()
snake_case_ = get_results(_A )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def A_ ( self : Optional[Any] ):
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_A , '''argv''' , _A ):
run_ta_mlm_flax.main()
snake_case_ = get_results(_A )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def A_ ( self : Optional[Any] ):
snake_case_ = 7 if get_gpu_count() > 1 else 2
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_A , '''argv''' , _A ):
run_flax_ner.main()
snake_case_ = get_results(_A )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def A_ ( self : Optional[Any] ):
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_A , '''argv''' , _A ):
run_qa.main()
snake_case_ = get_results(_A )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 56 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class a__ :
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
__lowerCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
__lowerCAmelCase = after_output[0].numpy()
__lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__lowerCAmelCase = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A )
__lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_A ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase = to_atuple(vision_model.config.image_size )
__lowerCAmelCase = to_atuple(vision_model.config.patch_size )
__lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(_A , _A , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_pretrained_model_and_inputs()
__lowerCAmelCase = model_a(**_A )
__lowerCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_A )
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A )
__lowerCAmelCase = model_a(**_A )
__lowerCAmelCase = after_outputs[0].numpy()
__lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
@require_tf
class a__ ( snake_case__ , unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
__lowerCAmelCase = 1_3
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFViTModel(_A , name="vision_model" )
__lowerCAmelCase = TFBertModel(_A , name="text_model" )
return vision_model, text_model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFViTModelTester(self )
__lowerCAmelCase = TFBertModelTester(self )
__lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class a__ ( snake_case__ , unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
__lowerCAmelCase = 1_3
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__lowerCAmelCase = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A )
__lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_A ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowerCAmelCase = to_atuple(vision_model.config.image_size )
__lowerCAmelCase = to_atuple(vision_model.config.patch_size )
__lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModel(_A , name="vision_model" )
__lowerCAmelCase = TFRobertaModel(_A , name="text_model" )
return vision_model, text_model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModelTester(self )
__lowerCAmelCase = TFRobertaModelTester(self )
__lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class a__ ( snake_case__ , unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
__lowerCAmelCase = 1_3
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFCLIPVisionModel(_A , name="vision_model" )
__lowerCAmelCase = TFBertModel(_A , name="text_model" )
return vision_model, text_model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFCLIPVisionModelTester(self )
__lowerCAmelCase = TFBertModelTester(self )
__lowerCAmelCase = clip_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_A )
__lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__lowerCAmelCase = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_A , padding=_A , return_tensors="np" )
__lowerCAmelCase = model(**_A )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCAmelCase = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _A , atol=1E-3 ) )
| 92 | 0 |
import random
from typing import Any
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
for _ in range(len(__SCREAMING_SNAKE_CASE ) ):
__snake_case : List[Any] = random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
__snake_case : str = random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
__snake_case , __snake_case : str = data[b], data[a]
return data
if __name__ == "__main__":
lowercase_ = [0, 1, 2, 3, 4, 5, 6, 7]
lowercase_ = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 20 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["ViTFeatureExtractor"]
lowercase_ = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20 | 1 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A__ : Optional[int] = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any]=None ) -> Optional[int]:
if subparsers is not None:
__lowerCamelCase : List[str] = subparsers.add_parser('tpu-config' , description=_description )
else:
__lowerCamelCase : List[Any] = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
__lowerCamelCase : Any = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=UpperCAmelCase_ , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=UpperCAmelCase_ , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
__lowerCamelCase : Dict = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=UpperCAmelCase_ , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCAmelCase__ ( UpperCAmelCase_ : Dict ) -> int:
__lowerCamelCase : Dict = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCAmelCase_ ):
__lowerCamelCase : Any = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__lowerCamelCase : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
__lowerCamelCase : List[Any] = defaults.commands
if not args.tpu_name:
__lowerCamelCase : Any = defaults.tpu_name
if not args.tpu_zone:
__lowerCamelCase : List[str] = defaults.tpu_zone
if args.accelerate_version == "dev":
__lowerCamelCase : Tuple = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
__lowerCamelCase : Optional[int] = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase_ ):
__lowerCamelCase : Any = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
__lowerCamelCase : Optional[int] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , UpperCAmelCase_ ):
__lowerCamelCase : Any = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__lowerCamelCase : Dict = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
__lowerCamelCase : Union[str, Any] = '; '.join(UpperCAmelCase_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__lowerCamelCase : List[str] = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(UpperCAmelCase_ )}' )
return
subprocess.run(UpperCAmelCase_ )
print('Successfully setup pod.' )
def UpperCAmelCase__ ( ) -> Tuple:
__lowerCamelCase : Optional[Any] = tpu_command_parser()
__lowerCamelCase : str = parser.parse_args()
tpu_command_launcher(UpperCAmelCase_ )
| 185 |
'''simple docstring'''
A__ : Optional[int] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__lowerCamelCase : Dict = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(UpperCAmelCase_ )
__lowerCamelCase : Optional[int] = ''.join(bin(UpperCAmelCase_ )[2:].zfill(8 ) for byte in data )
__lowerCamelCase : Any = len(UpperCAmelCase_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__lowerCamelCase : int = B'=' * ((6 - len(UpperCAmelCase_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCAmelCase_ ) % 6)
else:
__lowerCamelCase : str = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCAmelCase_ ) , 6 ) ).encode()
+ padding
)
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__lowerCamelCase : Dict = (
'argument should be a bytes-like object or ASCII string, '
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(UpperCAmelCase_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
try:
__lowerCamelCase : int = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
__lowerCamelCase : Union[str, Any] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCAmelCase_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__lowerCamelCase : Any = encoded_data[:-padding]
__lowerCamelCase : Optional[Any] = ''.join(
bin(B64_CHARSET.index(UpperCAmelCase_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__lowerCamelCase : Any = ''.join(
bin(B64_CHARSET.index(UpperCAmelCase_ ) )[2:].zfill(6 ) for char in encoded_data )
__lowerCamelCase : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCAmelCase_ ) , 8 )
]
return bytes(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Any=13 , UpperCamelCase : int=7 , UpperCamelCase : str=True , UpperCamelCase : Any=True , UpperCamelCase : Tuple=True , UpperCamelCase : int=True , UpperCamelCase : Any=99 , UpperCamelCase : List[str]=32 , UpperCamelCase : List[Any]=5 , UpperCamelCase : Tuple=4 , UpperCamelCase : Dict=37 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Dict=128 , UpperCamelCase : List[str]=32 , UpperCamelCase : Any=16 , UpperCamelCase : Any=2 , UpperCamelCase : str=0.02 , UpperCamelCase : Any=3 , UpperCamelCase : str=4 , UpperCamelCase : List[Any]=None , ):
'''simple docstring'''
__UpperCAmelCase : int = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Any = seq_length
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : str = use_input_mask
__UpperCAmelCase : Optional[int] = use_token_type_ids
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : List[str] = num_attention_heads
__UpperCAmelCase : Union[str, Any] = intermediate_size
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : List[str] = type_sequence_label_size
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : Optional[int] = num_labels
__UpperCAmelCase : Tuple = num_choices
__UpperCAmelCase : Tuple = scope
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
__UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = None
if self.use_token_type_ids:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Any = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : List[Any] = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
(
(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,
) : Tuple = self.prepare_config_and_inputs()
__UpperCAmelCase : Any = True
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase__ ( self : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = NezhaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
__UpperCAmelCase : Any = model(UpperCamelCase , token_type_ids=UpperCamelCase )
__UpperCAmelCase : List[str] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : List[str] , ):
'''simple docstring'''
__UpperCAmelCase : int = True
__UpperCAmelCase : Union[str, Any] = NezhaModel(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , )
__UpperCAmelCase : Any = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , encoder_hidden_states=UpperCamelCase , )
__UpperCAmelCase : Optional[int] = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = NezhaForMaskedLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = NezhaForNextSentencePrediction(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : int = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = NezhaForPreTraining(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : int = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , next_sentence_label=UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = NezhaForQuestionAnswering(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.num_labels
__UpperCAmelCase : int = NezhaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : Any = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : Optional[Any] = NezhaForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.num_choices
__UpperCAmelCase : Any = NezhaForMultipleChoice(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Any = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( A , A , A , unittest.TestCase ):
"""simple docstring"""
__a = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__a = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a = True
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int]=False ):
'''simple docstring'''
__UpperCAmelCase : str = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
if return_labels:
if model_class in get_values(UpperCamelCase ):
__UpperCAmelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase )
__UpperCAmelCase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase )
return inputs_dict
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Dict = NezhaModelTester(self )
__UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
(
(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
__UpperCAmelCase : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
@slow
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[Any] = NezhaModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@slow
@require_torch_gpu
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__UpperCAmelCase : int = True
__UpperCAmelCase : int = model_class(config=UpperCamelCase )
__UpperCAmelCase : Any = self._prepare_for_class(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : List[str] = torch.jit.trace(
UpperCamelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase , os.path.join(UpperCamelCase , """bert.pt""" ) )
__UpperCAmelCase : int = torch.jit.load(os.path.join(UpperCamelCase , """bert.pt""" ) , map_location=UpperCamelCase )
loaded(inputs_dict["""input_ids"""].to(UpperCamelCase ) , inputs_dict["""attention_mask"""].to(UpperCamelCase ) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
__UpperCAmelCase : Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
__UpperCAmelCase : Any = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , UpperCamelCase )
__UpperCAmelCase : Any = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
__UpperCAmelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase : Union[str, Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : List[str] = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
__UpperCAmelCase : str = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , UpperCamelCase )
__UpperCAmelCase : Optional[int] = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) )
| 320 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = ["""pixel_values"""]
def __init__( self : Tuple , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : str , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224}
__UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
__UpperCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" )
__UpperCAmelCase : int = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Optional[Any] = resample
__UpperCAmelCase : Any = do_center_crop
__UpperCAmelCase : int = crop_size
__UpperCAmelCase : Optional[int] = do_rescale
__UpperCAmelCase : List[Any] = rescale_factor
__UpperCAmelCase : Tuple = do_normalize
__UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCAmelCase : List[Any] = do_convert_rgb
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
__UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__UpperCAmelCase : int = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase )
return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ):
'''simple docstring'''
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ):
'''simple docstring'''
return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Any , ):
'''simple docstring'''
__UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : Dict = size if size is not None else self.size
__UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase )
__UpperCAmelCase : Dict = resample if resample is not None else self.resample
__UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase : Any = image_std if image_std is not None else self.image_std
__UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCAmelCase : List[str] = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCAmelCase : int = [convert_to_rgb(UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
__UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
__UpperCAmelCase : Optional[int] = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images]
if do_center_crop:
__UpperCAmelCase : int = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images]
if do_rescale:
__UpperCAmelCase : Dict = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_normalize:
__UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images]
__UpperCAmelCase : Any = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
__UpperCAmelCase : Any = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 320 | 1 |
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
lowercase : Union[str, Any] = create_tensor(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = gather(SCREAMING_SNAKE_CASE__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : Optional[int] = [state.process_index]
lowercase : Optional[Any] = gather_object(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == state.num_processes, f"{gathered_obj}, {len(SCREAMING_SNAKE_CASE__ )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), f"{gathered_obj} != {list(range(state.num_processes ) )}"
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
lowercase : str = create_tensor(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = broadcast(SCREAMING_SNAKE_CASE__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
lowercase : Tuple = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowercase : int = torch.arange(state.num_processes ).to(state.device )
lowercase : Dict = pad_across_processes(SCREAMING_SNAKE_CASE__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[str]:
# For now runs on only two processes
if state.num_processes != 2:
return
lowercase : Union[str, Any] = create_tensor(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = reduce(SCREAMING_SNAKE_CASE__ , """sum""" )
lowercase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), f"{reduced_tensor} != {truth_tensor}"
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Dict:
# For now runs on only two processes
if state.num_processes != 2:
return
lowercase : str = create_tensor(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = reduce(SCREAMING_SNAKE_CASE__ , """mean""" )
lowercase : Tuple = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), f"{reduced_tensor} != {truth_tensor}"
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
# For xla_spawn (TPUs)
main()
def _snake_case( ) -> Optional[Any]:
lowercase : Any = PartialState()
state.print(f"State: {state}" )
state.print("""testing gather""" )
test_gather(SCREAMING_SNAKE_CASE__ )
state.print("""testing gather_object""" )
test_gather_object(SCREAMING_SNAKE_CASE__ )
state.print("""testing broadcast""" )
test_broadcast(SCREAMING_SNAKE_CASE__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(SCREAMING_SNAKE_CASE__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(SCREAMING_SNAKE_CASE__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 20 |
'''simple docstring'''
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = len(snake_case )
for _ in range(snake_case ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
A_ = list(range(10, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 139 | 0 |
"""simple docstring"""
import sys
SCREAMING_SNAKE_CASE__ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCAmelCase__ ( _UpperCamelCase : str = N ) -> int:
"""simple docstring"""
snake_case = -sys.maxsize - 1
for i in range(len(_UpperCamelCase ) - 1_2 ):
snake_case = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 149 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
SCREAMING_SNAKE_CASE__ = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
SCREAMING_SNAKE_CASE__ = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
SCREAMING_SNAKE_CASE__ = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 149 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=2 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=36 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=6 , _UpperCamelCase=6 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , _UpperCamelCase=1_000 , ) -> List[Any]:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = text_seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = coordinate_size
lowerCAmelCase_ = shape_size
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_choices
lowerCAmelCase_ = scope
lowerCAmelCase_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCAmelCase_ = text_seq_length
lowerCAmelCase_ = (image_size // patch_size) ** 2 + 1
lowerCAmelCase_ = self.text_seq_length + self.image_seq_length
def __a ( self ) -> int:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase_ = bbox[i, j, 3]
lowerCAmelCase_ = bbox[i, j, 1]
lowerCAmelCase_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase_ = bbox[i, j, 2]
lowerCAmelCase_ = bbox[i, j, 0]
lowerCAmelCase_ = t
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCAmelCase_ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
lowerCAmelCase_ = LayoutLMvaModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
# text + image
lowerCAmelCase_ = model(_UpperCamelCase , pixel_values=_UpperCamelCase )
lowerCAmelCase_ = model(
_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
lowerCAmelCase_ = model(_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , token_type_ids=_UpperCamelCase )
lowerCAmelCase_ = model(_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCAmelCase_ = model(_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCAmelCase_ = model(pixel_values=_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = LayoutLMvaForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = model(
_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = LayoutLMvaForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = model(
_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
lowerCAmelCase_ = LayoutLMvaForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = model(
_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = config_and_inputs
lowerCAmelCase_ = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __a , __a , unittest.TestCase ):
_lowercase =False
_lowercase =False
_lowercase =False
_lowercase =(
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowercase =(
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __a ( self ) -> Tuple:
lowerCAmelCase_ = LayoutLMvaModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ) -> int:
lowerCAmelCase_ = copy.deepcopy(_UpperCamelCase )
if model_class in get_values(_UpperCamelCase ):
lowerCAmelCase_ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_UpperCamelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_UpperCamelCase ):
lowerCAmelCase_ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
elif model_class in get_values(_UpperCamelCase ):
lowerCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
lowerCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
elif model_class in [
*get_values(_UpperCamelCase ),
]:
lowerCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
elif model_class in [
*get_values(_UpperCamelCase ),
]:
lowerCAmelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_UpperCamelCase , )
return inputs_dict
def __a ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def __a ( self ) -> Union[str, Any]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = LayoutLMvaModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def __a ( self ) -> str:
return LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase ) if is_vision_available() else None
@slow
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(_UpperCamelCase )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=_UpperCamelCase , return_tensors="pt" ).pixel_values.to(_UpperCamelCase )
lowerCAmelCase_ = torch.tensor([[1, 2]] )
lowerCAmelCase_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCAmelCase_ = model(
input_ids=input_ids.to(_UpperCamelCase ) , bbox=bbox.to(_UpperCamelCase ) , pixel_values=pixel_values.to(_UpperCamelCase ) , )
# verify the logits
lowerCAmelCase_ = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , _UpperCamelCase )
lowerCAmelCase_ = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
| 231 |
import requests
_A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 231 | 1 |
'''simple docstring'''
def UpperCamelCase_( snake_case : int = 1_0_0_0_0_0_0 ):
'''simple docstring'''
snake_case_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , snake_case ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 92 |
'''simple docstring'''
def UpperCamelCase_( snake_case : int = 1_0_0_0_0_0_0 ):
'''simple docstring'''
snake_case_ = set(range(3 , snake_case , 2 ) )
primes.add(2 )
for p in range(3 , snake_case , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case , snake_case ) ) )
snake_case_ = [float(snake_case ) for n in range(limit + 1 )]
for p in primes:
for n in range(snake_case , limit + 1 , snake_case ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 92 | 1 |
from __future__ import annotations
from collections import namedtuple
def lowerCAmelCase_ (lowerCAmelCase__: float , lowerCAmelCase__: float , lowerCAmelCase__: float ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[Any] = '''bridgetower_vision_model'''
def __init__( self , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=3 , lowerCAmelCase_=16 , lowerCAmelCase_=2_88 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-05 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Any:
super().__init__(**lowerCAmelCase_ )
_A = hidden_size
_A = num_hidden_layers
_A = num_channels
_A = patch_size
_A = image_size
_A = initializer_factor
_A = layer_norm_eps
_A = stop_gradient
_A = share_layernorm
_A = remove_last_layer
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> "PretrainedConfig":
_A , _A = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_A = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = '''bridgetower_text_model'''
def __init__( self , lowerCAmelCase_=5_02_65 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=1 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_14 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-05 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> Optional[Any]:
super().__init__(**lowerCAmelCase_ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = initializer_factor
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = pad_token_id
_A = bos_token_id
_A = eos_token_id
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> "PretrainedConfig":
_A , _A = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_A = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[int] = '''bridgetower'''
def __init__( self , lowerCAmelCase_=True , lowerCAmelCase_="gelu" , lowerCAmelCase_=7_68 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-05 , lowerCAmelCase_=False , lowerCAmelCase_="add" , lowerCAmelCase_=12 , lowerCAmelCase_=6 , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> int:
# TODO: remove this once the Hub files are updated.
_A = kwargs.pop("""text_config_dict""" , lowerCAmelCase_ )
_A = kwargs.pop("""vision_config_dict""" , lowerCAmelCase_ )
super().__init__(**lowerCAmelCase_ )
_A = share_cross_modal_transformer_layers
_A = hidden_act
_A = hidden_size
_A = initializer_factor
_A = layer_norm_eps
_A = share_link_tower_layers
_A = link_tower_type
_A = num_attention_heads
_A = num_hidden_layers
_A = tie_word_embeddings
_A = init_layernorm_from_vision_encoder
if text_config is None:
_A = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_A = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_A = BridgeTowerTextConfig(**lowerCAmelCase_ )
_A = BridgeTowerVisionConfig(**lowerCAmelCase_ )
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = copy.deepcopy(self.__dict__ )
_A = self.text_config.to_dict()
_A = self.vision_config.to_dict()
_A = self.__class__.model_type
return output
| 180 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowerCAmelCase_ = logging.get_logger(__name__)
# General docstring
lowerCAmelCase_ = '''ResNetConfig'''
# Base docstring
lowerCAmelCase_ = '''microsoft/resnet-50'''
lowerCAmelCase_ = [1, 2_0_4_8, 7, 7]
# Image classification docstring
lowerCAmelCase_ = '''microsoft/resnet-50'''
lowerCAmelCase_ = '''tiger cat'''
lowerCAmelCase_ = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __lowerCAmelCase ( nn.Module ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ = 3 , __magic_name__ = 1 , __magic_name__ = "relu" ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : int = nn.Convad(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , padding=kernel_size // 2 , bias=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCamelCase (self , __magic_name__ ) -> Tensor:
'''simple docstring'''
snake_case_ : List[str] = self.convolution(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = self.normalization(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__(self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Dict = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
snake_case_ : str = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
snake_case_ : List[str] = config.num_channels
def lowerCamelCase (self , __magic_name__ ) -> Tensor:
'''simple docstring'''
snake_case_ : Optional[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
snake_case_ : Dict = self.embedder(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = self.pooler(_SCREAMING_SNAKE_CASE )
return embedding
class __lowerCAmelCase ( nn.Module ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ = 2 ) -> List[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Union[str, Any] = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , stride=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (self , __magic_name__ ) -> Tensor:
'''simple docstring'''
snake_case_ : List[str] = self.convolution(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = self.normalization(_SCREAMING_SNAKE_CASE )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ = 1 , __magic_name__ = "relu" ) -> Tuple:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[Any] = in_channels != out_channels or stride != 1
snake_case_ : Tuple = (
ResNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
snake_case_ : Optional[int] = nn.Sequential(
ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , activation=_SCREAMING_SNAKE_CASE ) , )
snake_case_ : Any = ACTaFN[activation]
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = hidden_state
snake_case_ : Union[str, Any] = self.layer(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
snake_case_ : Dict = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ = 1 , __magic_name__ = "relu" , __magic_name__ = 4 ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[Any] = in_channels != out_channels or stride != 1
snake_case_ : Tuple = out_channels // reduction
snake_case_ : Tuple = (
ResNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
snake_case_ : List[str] = nn.Sequential(
ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=_SCREAMING_SNAKE_CASE ) , )
snake_case_ : str = ACTaFN[activation]
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : int = hidden_state
snake_case_ : Optional[Any] = self.layer(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
snake_case_ : Dict = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 2 , __magic_name__ = 2 , ) -> Tuple:
'''simple docstring'''
super().__init__()
snake_case_ : List[str] = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
snake_case_ : List[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , *[layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowerCamelCase (self , __magic_name__ ) -> Tensor:
'''simple docstring'''
snake_case_ : str = input
for layer in self.layers:
snake_case_ : Optional[Any] = layer(_SCREAMING_SNAKE_CASE )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__(self , __magic_name__ ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
snake_case_ : str = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_SCREAMING_SNAKE_CASE , config.depths[1:] ):
self.stages.append(ResNetStage(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , depth=_SCREAMING_SNAKE_CASE ) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ = False , __magic_name__ = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
snake_case_ : int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case_ : Tuple = hidden_states + (hidden_state,)
snake_case_ : Optional[Any] = stage_module(_SCREAMING_SNAKE_CASE )
if output_hidden_states:
snake_case_ : Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
lowerCamelCase_ : Tuple = ResNetConfig
lowerCamelCase_ : List[Any] = 'resnet'
lowerCamelCase_ : Dict = 'pixel_values'
lowerCamelCase_ : Optional[int] = True
def lowerCamelCase (self , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(_SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCamelCase (self , __magic_name__ , __magic_name__=False ) -> Optional[int]:
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ : List[Any] = value
lowerCAmelCase_ = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCAmelCase_ = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''', SCREAMING_SNAKE_CASE__, )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
def __init__(self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = config
snake_case_ : Optional[int] = ResNetEmbeddings(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = ResNetEncoder(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
snake_case_ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : Any = self.embedder(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = self.encoder(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
snake_case_ : Any = encoder_outputs[0]
snake_case_ : str = self.pooler(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , pooler_output=_SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ''', SCREAMING_SNAKE_CASE__, )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
def __init__(self , __magic_name__ ) -> Dict:
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = config.num_labels
snake_case_ : Union[str, Any] = ResNetModel(_SCREAMING_SNAKE_CASE )
# classification head
snake_case_ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase (self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
snake_case_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : Any = self.resnet(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = outputs.pooler_output if return_dict else outputs[1]
snake_case_ : str = self.classifier(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case_ : Tuple = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case_ : Tuple = "single_label_classification"
else:
snake_case_ : int = "multi_label_classification"
if self.config.problem_type == "regression":
snake_case_ : str = MSELoss()
if self.num_labels == 1:
snake_case_ : Union[str, Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case_ : int = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
snake_case_ : Optional[Any] = CrossEntropyLoss()
snake_case_ : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case_ : Union[str, Any] = BCEWithLogitsLoss()
snake_case_ : Tuple = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not return_dict:
snake_case_ : List[str] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ''', SCREAMING_SNAKE_CASE__, )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
def __init__(self , __magic_name__ ) -> int:
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE )
super()._init_backbone(_SCREAMING_SNAKE_CASE )
snake_case_ : str = [config.embedding_size] + config.hidden_sizes
snake_case_ : List[Any] = ResNetEmbeddings(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = ResNetEncoder(_SCREAMING_SNAKE_CASE )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@replace_return_docstrings(output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = None ) -> BackboneOutput:
'''simple docstring'''
snake_case_ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ : Union[str, Any] = self.embedder(_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = self.encoder(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = outputs.hidden_states
snake_case_ : Any = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
snake_case_ : Union[str, Any] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_SCREAMING_SNAKE_CASE , )
| 364 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
lowerCAmelCase_ = None
lowerCAmelCase_ = {
'''7B''': 1_1_0_0_8,
'''13B''': 1_3_8_2_4,
'''30B''': 1_7_9_2_0,
'''65B''': 2_2_0_1_6,
'''70B''': 2_8_6_7_2,
}
lowerCAmelCase_ = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1 , _UpperCamelCase=256 ) -> Optional[int]:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
with open(_UpperCamelCase , '''r''' ) as f:
return json.load(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
with open(_UpperCamelCase , '''w''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ) -> Optional[Any]:
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : int = os.path.join(_UpperCamelCase , '''tmp''' )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : Dict = read_json(os.path.join(_UpperCamelCase , '''params.json''' ) )
snake_case_ : Tuple = NUM_SHARDS[model_size]
snake_case_ : Optional[Any] = params['''n_layers''']
snake_case_ : int = params['''n_heads''']
snake_case_ : Dict = n_heads // num_shards
snake_case_ : List[Any] = params['''dim''']
snake_case_ : str = dim // n_heads
snake_case_ : Any = 10_000.0
snake_case_ : Any = 1.0 / (base ** (torch.arange(0 , _UpperCamelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
snake_case_ : Optional[Any] = params['''n_kv_heads'''] # for GQA / MQA
snake_case_ : Optional[Any] = n_heads_per_shard // num_key_value_heads
snake_case_ : List[Any] = dim // num_key_value_heads
else: # compatibility with other checkpoints
snake_case_ : str = n_heads
snake_case_ : Optional[int] = n_heads_per_shard
snake_case_ : str = dim
# permute for sliced rotary
def permute(_UpperCamelCase , _UpperCamelCase=n_heads , _UpperCamelCase=dim , _UpperCamelCase=dim ):
return w.view(_UpperCamelCase , dima // n_heads // 2 , 2 , _UpperCamelCase ).transpose(1 , 2 ).reshape(_UpperCamelCase , _UpperCamelCase )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
snake_case_ : Optional[Any] = torch.load(os.path.join(_UpperCamelCase , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
snake_case_ : Union[str, Any] = [
torch.load(os.path.join(_UpperCamelCase , f'''consolidated.{i:02d}.pth''' ) , map_location='''cpu''' )
for i in range(_UpperCamelCase )
]
snake_case_ : Optional[Any] = 0
snake_case_ : str = {'''weight_map''': {}}
for layer_i in range(_UpperCamelCase ):
snake_case_ : Optional[int] = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case_ : str = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
snake_case_ : Union[str, Any] = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
snake_case_ : int = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ : Optional[int] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
snake_case_ : int = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[int] = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(_UpperCamelCase )] , dim=1 )
snake_case_ : Dict = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_UpperCamelCase )] , dim=0 )
snake_case_ : Union[str, Any] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_UpperCamelCase )] , dim=1 )
snake_case_ : Optional[int] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_UpperCamelCase )] , dim=0 )
snake_case_ : str = inv_freq
for k, v in state_dict.items():
snake_case_ : Dict = filename
param_count += v.numel()
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ : Any = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case_ : List[str] = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
snake_case_ : Dict = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(_UpperCamelCase )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(_UpperCamelCase )] , dim=0 ),
}
for k, v in state_dict.items():
snake_case_ : List[str] = filename
param_count += v.numel()
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
# Write configs
snake_case_ : int = {'''total_size''': param_count * 2}
write_json(_UpperCamelCase , os.path.join(_UpperCamelCase , '''pytorch_model.bin.index.json''' ) )
snake_case_ : str = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
snake_case_ : Optional[int] = params['''multiple_of'''] if '''multiple_of''' in params else 256
snake_case_ : Optional[Any] = LlamaConfig(
hidden_size=_UpperCamelCase , intermediate_size=compute_intermediate_size(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=_UpperCamelCase , )
config.save_pretrained(_UpperCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
snake_case_ : Union[str, Any] = LlamaForCausalLM.from_pretrained(_UpperCamelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=_UpperCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(_UpperCamelCase , safe_serialization=_UpperCamelCase )
shutil.rmtree(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
snake_case_ : Union[str, Any] = tokenizer_class(_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=_UpperCamelCase , help='''Whether or not to save using `safetensors`.''' )
snake_case_ : Dict = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
snake_case_ : Dict = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a__ ( snake_case__ ):
"""simple docstring"""
__lowerCamelCase = ['pixel_values']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowercase )
A__ = size if size is not None else {"shortest_edge": 224}
A__ = get_size_dict(lowercase , default_to_square=lowercase )
A__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
A__ = get_size_dict(lowercase , default_to_square=lowercase , param_name="crop_size" )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A__ = image_std if image_std is not None else OPENAI_CLIP_STD
A__ = do_convert_rgb
def UpperCamelCase ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> List[str]:
'''simple docstring'''
A__ = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A__ = get_resize_output_image_size(lowercase , size=size["shortest_edge"] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Union[str, Any]:
'''simple docstring'''
A__ = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["height"], size["width"]) , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> str:
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> Union[str, Any]:
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> str:
'''simple docstring'''
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(lowercase , param_name="size" , default_to_square=lowercase )
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(lowercase , param_name="crop_size" , default_to_square=lowercase )
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A__ = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A__ = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A__ = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A__ = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A__ = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A__ = {"pixel_values": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 68 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2", "stage3"] , UpperCAmelCase=[1, 2, 3] , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCAmelCase ):
_UpperCAmelCase = ['stem']
_UpperCAmelCase = MaskFormerSwinBackbone(config=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
"""simple docstring"""
return
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# Swin has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCAmelCase ):
_UpperCAmelCase = 0
return t
def check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase={} ):
with torch.no_grad():
_UpperCAmelCase = model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase ).to_tuple()
def recursive_check(UpperCAmelCase , UpperCAmelCase ):
if isinstance(UpperCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase , UpperCAmelCase ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCAmelCase ) , set_nan_tensor_to_zero(UpperCAmelCase ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}. Dict has"""
F""" `nan`: {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}."""
) , )
recursive_check(UpperCAmelCase , UpperCAmelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'output_hidden_states': True} )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'output_hidden_states': True} )
@require_torch
class __lowerCamelCase ( unittest.TestCase , snake_case__):
"""simple docstring"""
UpperCamelCase__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase__ = MaskFormerSwinConfig
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_UpperCAmelCase = backbone_class(UpperCAmelCase )
backbone.to(UpperCAmelCase )
backbone.eval()
_UpperCAmelCase = backbone(**UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCAmelCase = backbone(**UpperCAmelCase , output_hidden_states=UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCAmelCase = backbone(**UpperCAmelCase , output_attentions=UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 39 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def a ( self : List[str] ):
__UpperCAmelCase = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
__UpperCAmelCase = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
__UpperCAmelCase = '''The dog is cute and lives in the garden house'''
__UpperCAmelCase = jnp.array([tokenizer.encode(_lowercase )] )
__UpperCAmelCase = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
__UpperCAmelCase = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
__UpperCAmelCase = model(_lowercase )['''last_hidden_state''']
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) )
| 86 |
"""simple docstring"""
def lowercase__ ( snake_case_ :int , snake_case_ :int , snake_case_ :int ):
__UpperCAmelCase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowercase__ ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86 | 1 |
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
a = [[] for _ in range(A )]
a = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(A ) <= key:
return input_string
for position, character in enumerate(A ):
a = position % (lowest * 2) # puts it in bounds
a = min(A, lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(A )
a = ["".join(A ) for row in temp_grid]
a = "".join(A )
return output_string
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
a = []
a = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
a = [[] for _ in range(A )] # generates template
for position in range(len(A ) ):
a = position % (lowest * 2) # puts it in bounds
a = min(A, lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
a = 0
for row in temp_grid: # fills in the characters
a = input_string[counter : counter + len(A )]
grid.append(list(A ) )
counter += len(A )
a = "" # reads as zigzag
for position in range(len(A ) ):
a = position % (lowest * 2) # puts it in bounds
a = min(A, lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __magic_name__ ( A : str ):
'''simple docstring'''
a = {}
for key_guess in range(1, len(A ) ): # tries every key
a = decrypt(A, A )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Tuple = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __snake_case ( UpperCamelCase_ ):
_a = '''xlm-roberta-xl'''
def __init__( self : int , A_ : List[str]=2_5_0_8_8_0 , A_ : List[str]=2_5_6_0 , A_ : Optional[int]=3_6 , A_ : List[Any]=3_2 , A_ : Optional[int]=1_0_2_4_0 , A_ : Dict="gelu" , A_ : int=0.1 , A_ : Optional[Any]=0.1 , A_ : int=5_1_4 , A_ : Any=1 , A_ : Optional[Any]=0.02 , A_ : str=1e-05 , A_ : Dict=1 , A_ : Any=0 , A_ : Tuple=2 , A_ : str="absolute" , A_ : str=True , A_ : List[str]=None , **A_ : Dict , ):
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_)
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Tuple = hidden_dropout_prob
lowerCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : Dict = type_vocab_size
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = position_embedding_type
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : List[str] = classifier_dropout
class __snake_case ( UpperCamelCase_ ):
@property
def UpperCAmelCase__ ( self : List[str]):
if self.task == "multiple-choice":
lowerCAmelCase_ : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 103 | 0 |
from __future__ import annotations
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | float:
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(_SCREAMING_SNAKE_CASE )
or left < -len(_SCREAMING_SNAKE_CASE )
or right >= len(_SCREAMING_SNAKE_CASE )
or right < -len(_SCREAMING_SNAKE_CASE )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE = find_max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE = find_max(_SCREAMING_SNAKE_CASE , mid + 1 , _SCREAMING_SNAKE_CASE ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 193 |
from __future__ import annotations
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
SCREAMING_SNAKE_CASE = i + 1
else:
SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
| 193 | 1 |
"""simple docstring"""
from __future__ import annotations
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[Any] = '''focalnet'''
def __init__( self ,__UpperCAmelCase=224 ,__UpperCAmelCase=4 ,__UpperCAmelCase=3 ,__UpperCAmelCase=96 ,__UpperCAmelCase=False ,__UpperCAmelCase=[192, 384, 768, 768] ,__UpperCAmelCase=[2, 2, 6, 2] ,__UpperCAmelCase=[2, 2, 2, 2] ,__UpperCAmelCase=[3, 3, 3, 3] ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=4.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=False ,__UpperCAmelCase=1E-4 ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=32 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Optional[Any]:
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Dict = image_size
lowerCAmelCase__ : int = patch_size
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : Dict = embed_dim
lowerCAmelCase__ : List[str] = use_conv_embed
lowerCAmelCase__ : List[Any] = hidden_sizes
lowerCAmelCase__ : Dict = depths
lowerCAmelCase__ : List[str] = focal_levels
lowerCAmelCase__ : List[str] = focal_windows
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Dict = mlp_ratio
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : Tuple = drop_path_rate
lowerCAmelCase__ : Dict = use_layerscale
lowerCAmelCase__ : Optional[Any] = layerscale_value
lowerCAmelCase__ : str = use_post_layernorm
lowerCAmelCase__ : Union[str, Any] = use_post_layernorm_in_modulation
lowerCAmelCase__ : int = normalize_modulator
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : List[str] = layer_norm_eps
lowerCAmelCase__ : List[Any] = encoder_stride
lowerCAmelCase__ : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Any = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase ,out_indices=__UpperCAmelCase ,stage_names=self.stage_names )
| 37 | 0 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = (DDIMParallelScheduler,)
lowerCAmelCase : List[Any] = (('eta', 0.0), ('num_inference_steps', 5_0))
def __lowercase ( self : Tuple ,**_UpperCAmelCase : Union[str, Any] ):
_a : Union[str, Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**_UpperCAmelCase )
return config
def __lowercase ( self : Dict ,**_UpperCAmelCase : Optional[int] ):
_a : List[str] = self.scheduler_classes[0]
_a : Optional[int] = self.get_scheduler_config(**_UpperCAmelCase )
_a : List[str] = scheduler_class(**_UpperCAmelCase )
_a : Optional[int] = 10, 0.0
_a : Optional[Any] = self.dummy_model()
_a : Dict = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for t in scheduler.timesteps:
_a : Union[str, Any] = model(_UpperCAmelCase ,_UpperCAmelCase )
_a : int = scheduler.step(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ).prev_sample
return sample
def __lowercase ( self : Dict ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_UpperCAmelCase )
_a : List[str] = self.scheduler_classes[0]
_a : Optional[int] = self.get_scheduler_config(steps_offset=1 )
_a : Dict = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def __lowercase ( self : Any ):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase ,beta_end=_UpperCAmelCase )
def __lowercase ( self : int ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def __lowercase ( self : Tuple ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def __lowercase ( self : Optional[int] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def __lowercase ( self : int ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase ,prediction_type=_UpperCAmelCase ,sample_max_value=_UpperCAmelCase ,)
def __lowercase ( self : Tuple ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ):
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=_UpperCAmelCase ,num_inference_steps=_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ):
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_UpperCAmelCase ,eta=_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ):
_a : List[str] = self.scheduler_classes[0]
_a : List[str] = self.get_scheduler_config()
_a : Optional[int] = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1E-5
def __lowercase ( self : Tuple ):
_a : Union[str, Any] = self.scheduler_classes[0]
_a : Tuple = self.get_scheduler_config()
_a : Any = scheduler_class(**_UpperCAmelCase )
_a : List[Any] = 10, 0.0
scheduler.set_timesteps(_UpperCAmelCase )
_a : Dict = self.dummy_model()
_a : Any = self.dummy_sample_deter
_a : Any = self.dummy_sample_deter + 0.1
_a : str = self.dummy_sample_deter - 0.1
_a : Dict = samplea.shape[0]
_a : Optional[int] = torch.stack([samplea, samplea, samplea] ,dim=0 )
_a : List[str] = torch.arange(_UpperCAmelCase )[0:3, None].repeat(1 ,_UpperCAmelCase )
_a : List[Any] = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
_a : Optional[Any] = scheduler.batch_step_no_noise(_UpperCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,_UpperCAmelCase )
_a : Optional[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
_a : Tuple = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def __lowercase ( self : Any ):
_a : Any = self.full_loop()
_a : List[str] = torch.sum(torch.abs(_UpperCAmelCase ) )
_a : Optional[int] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def __lowercase ( self : str ):
_a : Optional[Any] = self.full_loop(prediction_type='v_prediction' )
_a : List[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
_a : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def __lowercase ( self : Dict ):
# We specify different beta, so that the first alpha is 0.99
_a : Tuple = self.full_loop(set_alpha_to_one=_UpperCAmelCase ,beta_start=0.01 )
_a : int = torch.sum(torch.abs(_UpperCAmelCase ) )
_a : Any = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def __lowercase ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
_a : int = self.full_loop(set_alpha_to_one=_UpperCAmelCase ,beta_start=0.01 )
_a : Any = torch.sum(torch.abs(_UpperCAmelCase ) )
_a : List[str] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 359 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowerCAmelCase = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class __magic_name__ :
def __init__( self : Union[str, Any] ,_UpperCAmelCase : int = 14 ):
if group not in primes:
raise ValueError('Unsupported Group' )
_a : str = primes[group]['prime']
_a : Optional[int] = primes[group]['generator']
_a : Tuple = int(hexlify(urandom(32 ) ) ,base=16 )
def __lowercase ( self : Dict ):
return hex(self.__private_key )[2:]
def __lowercase ( self : List[str] ):
_a : int = pow(self.generator ,self.__private_key ,self.prime )
return hex(_UpperCAmelCase )[2:]
def __lowercase ( self : int ,_UpperCAmelCase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_UpperCAmelCase ,(self.prime - 1) // 2 ,self.prime ) == 1
)
def __lowercase ( self : Tuple ,_UpperCAmelCase : str ):
_a : List[Any] = int(_UpperCAmelCase ,base=16 )
if not self.is_valid_public_key(_UpperCAmelCase ):
raise ValueError('Invalid public key' )
_a : Any = pow(_UpperCAmelCase ,self.__private_key ,self.prime )
return shaaaa(str(_UpperCAmelCase ).encode() ).hexdigest()
@staticmethod
def __lowercase ( _UpperCAmelCase : int ,_UpperCAmelCase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_UpperCAmelCase ,(prime - 1) // 2 ,_UpperCAmelCase ) == 1
)
@staticmethod
def __lowercase ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : int = 14 ):
_a : str = int(_UpperCAmelCase ,base=16 )
_a : int = int(_UpperCAmelCase ,base=16 )
_a : Any = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(_UpperCAmelCase ,_UpperCAmelCase ):
raise ValueError('Invalid public key' )
_a : List[str] = pow(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
return shaaaa(str(_UpperCAmelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=13 , A_=3 , A_=224 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
UpperCamelCase : Any = size if size is not None else {"height": 18, "width": 18}
UpperCamelCase : List[str] = parent
UpperCamelCase : int = batch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Tuple = min_resolution
UpperCamelCase : Union[str, Any] = max_resolution
UpperCamelCase : str = do_resize
UpperCamelCase : str = size
UpperCamelCase : List[Any] = do_normalize
UpperCamelCase : Optional[int] = image_mean
UpperCamelCase : Optional[Any] = image_std
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :int = ViTImageProcessor if is_vision_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = EfficientFormerImageProcessorTester(self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase : Tuple = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCamelCase : int = image_processor(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase : str = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCamelCase : str = image_processor(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Optional[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase : List[str] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCamelCase : Dict = image_processor(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 52 | import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_UpperCAmelCase = """scheduler_config.json"""
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = 5
lowerCamelCase_ = 6
lowerCamelCase_ = 7
lowerCamelCase_ = 8
lowerCamelCase_ = 9
lowerCamelCase_ = 1_0
lowerCamelCase_ = 1_1
lowerCamelCase_ = 1_2
lowerCamelCase_ = 1_3
lowerCamelCase_ = 1_4
@dataclass
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 42
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = SCHEDULER_CONFIG_NAME
lowerCamelCase_ = []
lowerCamelCase_ = True
@classmethod
def lowerCAmelCase_ ( cls , lowercase = None , lowercase = None , lowercase=False , **lowercase , ):
"""simple docstring"""
A_ , A_ , A_ : int = cls.load_config(
pretrained_model_name_or_path=lowercase , subfolder=lowercase , return_unused_kwargs=lowercase , return_commit_hash=lowercase , **lowercase , )
return cls.from_config(lowercase , return_unused_kwargs=lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , **lowercase ):
"""simple docstring"""
self.save_config(save_directory=lowercase , push_to_hub=lowercase , **lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def lowerCAmelCase_ ( cls ):
"""simple docstring"""
A_ : Optional[Any] = list(set([cls.__name__] + cls._compatibles ) )
A_ : Any = importlib.import_module(__name__.split('.' )[0] )
A_ : Tuple = [
getattr(lowercase , lowercase ) for c in compatible_classes_str if hasattr(lowercase , lowercase )
]
return compatible_classes
| 140 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __lowerCAmelCase :
def __init__(self , __magic_name__ , ) -> str:
'''simple docstring'''
snake_case_ : Any = parent
snake_case_ : Tuple = 13
snake_case_ : int = 7
snake_case_ : Union[str, Any] = True
snake_case_ : List[str] = True
snake_case_ : Optional[int] = True
snake_case_ : List[str] = True
snake_case_ : Union[str, Any] = True
snake_case_ : Optional[Any] = False
snake_case_ : Dict = False
snake_case_ : Dict = False
snake_case_ : Any = 2
snake_case_ : Dict = 99
snake_case_ : Union[str, Any] = 0
snake_case_ : Tuple = 32
snake_case_ : Optional[Any] = 2
snake_case_ : Dict = 4
snake_case_ : List[Any] = 0.1
snake_case_ : Dict = 0.1
snake_case_ : Any = 512
snake_case_ : Optional[Any] = 16
snake_case_ : str = 2
snake_case_ : Union[str, Any] = 0.02
snake_case_ : Optional[int] = 3
snake_case_ : Optional[int] = 4
snake_case_ : Union[str, Any] = '''last'''
snake_case_ : Any = True
snake_case_ : Dict = None
snake_case_ : Optional[int] = 0
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
snake_case_ : Dict = None
if self.use_input_lengths:
snake_case_ : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case_ : Optional[int] = None
if self.use_token_type_ids:
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case_ : List[str] = None
snake_case_ : List[Any] = None
snake_case_ : Optional[int] = None
if self.use_labels:
snake_case_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : int = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : Optional[int] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = TFFlaubertModel(config=__magic_name__ )
snake_case_ : List[Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
snake_case_ : Dict = model(__magic_name__ )
snake_case_ : Dict = [input_ids, input_mask]
snake_case_ : Tuple = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = TFFlaubertWithLMHeadModel(__magic_name__ )
snake_case_ : Optional[int] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
snake_case_ : Dict = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = TFFlaubertForQuestionAnsweringSimple(__magic_name__ )
snake_case_ : Any = {'''input_ids''': input_ids, '''lengths''': input_lengths}
snake_case_ : List[Any] = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = TFFlaubertForSequenceClassification(__magic_name__ )
snake_case_ : int = {'''input_ids''': input_ids, '''lengths''': input_lengths}
snake_case_ : Dict = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = self.num_labels
snake_case_ : Dict = TFFlaubertForTokenClassification(config=__magic_name__ )
snake_case_ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ : Union[str, Any] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> int:
'''simple docstring'''
snake_case_ : Any = self.num_choices
snake_case_ : Any = TFFlaubertForMultipleChoice(config=__magic_name__ )
snake_case_ : Tuple = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
snake_case_ : Dict = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
snake_case_ : Optional[int] = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
snake_case_ : Optional[int] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
snake_case_ : Dict = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : str = config_and_inputs
snake_case_ : List[Any] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( _a, _a, unittest.TestCase ):
lowerCamelCase_ : int = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase_ : List[str] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCamelCase_ : List[str] = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : Any = False
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = TFFlaubertModelTester(self )
snake_case_ : int = ConfigTester(self , config_class=__magic_name__ , emb_dim=37 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__magic_name__ )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__magic_name__ )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__magic_name__ )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__magic_name__ )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__magic_name__ )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__magic_name__ )
@slow
def lowerCamelCase (self ) -> int:
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[Any] = TFFlaubertModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
snake_case_ : int = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
snake_case_ : List[str] = model(__magic_name__ )[0]
snake_case_ : List[Any] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
snake_case_ : Union[str, Any] = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 279 |
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
snake_case_ : List[str] = len(bin(_UpperCamelCase )[3:] )
snake_case_ : str = bin(abs(_UpperCamelCase ) - (1 << binary_number_length) )[3:]
snake_case_ : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_UpperCamelCase ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 | 1 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
lowerCamelCase__ = """1"""
lowerCamelCase__ = """0"""
lowerCamelCase__ = """1"""
lowerCamelCase__ = ort.SessionOptions()
lowerCamelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("""Create inference session...""")
lowerCamelCase__ = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""]
lowerCamelCase__ = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider)
lowerCamelCase__ = ort.RunOptions()
lowerCamelCase__ = 128
lowerCamelCase__ = 1
lowerCamelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCamelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCamelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print("""Warm up phase...""")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Start inference...""")
lowerCamelCase__ = time.time()
lowerCamelCase__ = 2_000
lowerCamelCase__ = {}
for iter in range(max_iters):
lowerCamelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1_000 / max_iters)) | 86 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) | 86 | 1 |
"""simple docstring"""
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =[3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
__UpperCamelCase =6
__UpperCamelCase =1
__UpperCamelCase =1_9_0_1
__UpperCamelCase =0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__UpperCamelCase =day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
__UpperCamelCase =day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
__UpperCamelCase =day - days_per_month[month - 2]
if month > 1_2:
year += 1
__UpperCamelCase =1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 85 | """simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowerCAmelCase (__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any]=False ):
"""simple docstring"""
__UpperCamelCase =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__UpperCamelCase =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__UpperCamelCase =''''''
else:
__UpperCamelCase ='''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCamelCase =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__UpperCamelCase =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase =in_proj_weight[
: config.hidden_size, :
]
__UpperCamelCase =in_proj_bias[: config.hidden_size]
__UpperCamelCase =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCamelCase =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCamelCase =in_proj_weight[
-config.hidden_size :, :
]
__UpperCamelCase =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase (__UpperCamelCase : Tuple ):
"""simple docstring"""
__UpperCamelCase =['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase =dct.pop(__UpperCamelCase )
__UpperCamelCase =val
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCamelCase =Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : Dict=True ):
"""simple docstring"""
__UpperCamelCase =ViTConfig()
# patch_size
if model_name[-1] == "8":
__UpperCamelCase =8
# set labels if required
if not base_model:
__UpperCamelCase =1_0_0_0
__UpperCamelCase ='''huggingface/label-files'''
__UpperCamelCase ='''imagenet-1k-id2label.json'''
__UpperCamelCase =json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase ={int(__UpperCamelCase ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__UpperCamelCase =3_8_4
__UpperCamelCase =1_5_3_6
__UpperCamelCase =1_2
__UpperCamelCase =6
# load original model from torch hub
__UpperCamelCase =torch.hub.load('''facebookresearch/dino:main''' , __UpperCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__UpperCamelCase =original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
__UpperCamelCase =create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load HuggingFace model
if base_model:
__UpperCamelCase =ViTModel(__UpperCamelCase , add_pooling_layer=__UpperCamelCase ).eval()
else:
__UpperCamelCase =ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
__UpperCamelCase =ViTImageProcessor()
__UpperCamelCase =image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCamelCase =encoding['''pixel_values''']
__UpperCamelCase =model(__UpperCamelCase )
if base_model:
__UpperCamelCase =original_model(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
__UpperCamelCase =original_model(__UpperCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
__lowercase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 85 | 1 |
import os
import sys
import unittest
lowercase__ : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase__ : Tuple = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
lowercase__ : Optional[Any] = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = get_test_to_tester_mapping(__lowerCamelCase )
snake_case_ = get_test_to_tester_mapping(__lowerCamelCase )
snake_case_ = {"BertModelTest": "BertModelTester"}
snake_case_ = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = get_model_to_test_mapping(__lowerCamelCase )
snake_case_ = get_model_to_test_mapping(__lowerCamelCase )
snake_case_ = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
snake_case_ = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = get_model_to_tester_mapping(__lowerCamelCase )
snake_case_ = get_model_to_tester_mapping(__lowerCamelCase )
snake_case_ = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
snake_case_ = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
| 187 | from math import sqrt
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Union[str, Any] = 0
for i in range(1, int(sqrt(snake_case__ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case__ ):
total += i + n // i
elif i == sqrt(snake_case__ ):
total += i
return total - n
def _UpperCamelCase ( snake_case__ = 1_0000 ) -> int:
__UpperCAmelCase : List[str] = sum(
i
for i in range(1, snake_case__ )
if sum_of_divisors(sum_of_divisors(snake_case__ ) ) == i and sum_of_divisors(snake_case__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 157 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
_A = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
_A = {
'facebook/bart-base': 1_0_2_4,
'facebook/bart-large': 1_0_2_4,
'facebook/bart-large-mnli': 1_0_2_4,
'facebook/bart-large-cnn': 1_0_2_4,
'facebook/bart-large-xsum': 1_0_2_4,
'yjernite/bart_eli5': 1_0_2_4,
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ['input_ids', 'attention_mask']
lowercase_ = BartTokenizer
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_="replace" , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , UpperCAmelCase_=False , UpperCAmelCase_=True , **UpperCAmelCase_ , ) -> Union[str, Any]:
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase_ ) != add_prefix_space:
lowerCamelCase : Tuple = getattr(UpperCAmelCase_ , pre_tok_state.pop('type' ) )
lowerCamelCase : Optional[Any] = add_prefix_space
lowerCamelCase : str = pre_tok_class(**UpperCAmelCase_ )
lowerCamelCase : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCamelCase : Dict = 'post_processor'
lowerCamelCase : str = getattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ )
if tokenizer_component_instance:
lowerCamelCase : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase : int = tuple(state['sep'] )
if "cls" in state:
lowerCamelCase : str = tuple(state['cls'] )
lowerCamelCase : Optional[Any] = False
if state.get('add_prefix_space' , UpperCAmelCase_ ) != add_prefix_space:
lowerCamelCase : Dict = add_prefix_space
lowerCamelCase : Tuple = True
if state.get('trim_offsets' , UpperCAmelCase_ ) != trim_offsets:
lowerCamelCase : Tuple = trim_offsets
lowerCamelCase : Dict = True
if changes_to_apply:
lowerCamelCase : Optional[int] = getattr(UpperCAmelCase_ , state.pop('type' ) )
lowerCamelCase : Any = component_class(**UpperCAmelCase_ )
setattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ )
@property
def _UpperCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> List[Any]:
lowerCamelCase : Optional[int] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else value
lowerCamelCase : int = value
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> BatchEncoding:
lowerCamelCase : str = kwargs.get('is_split_into_words' , UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> BatchEncoding:
lowerCamelCase : Optional[Any] = kwargs.get('is_split_into_words' , UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ) -> Tuple[str]:
lowerCamelCase : Any = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_=None ) -> List[Any]:
lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ) -> List[int]:
lowerCamelCase : List[Any] = [self.sep_token_id]
lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 352 |
"""simple docstring"""
import numpy as np
def UpperCAmelCase ( a_, a_, a_ = 1E-12, a_ = 100, ):
'''simple docstring'''
assert np.shape(a_ )[0] == np.shape(a_ )[1]
# Ensure proper dimensionality.
assert np.shape(a_ )[0] == np.shape(a_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(a_ ) == np.iscomplexobj(a_ )
lowerCamelCase : Optional[int] = np.iscomplexobj(a_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(a_, input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : List[str] = 0
lowerCamelCase : Any = 0
lowerCamelCase : Dict = 1E12
while not convergence:
# Multiple matrix by the vector.
lowerCamelCase : Optional[int] = np.dot(a_, a_ )
# Normalize the resulting output vector.
lowerCamelCase : Optional[int] = w / np.linalg.norm(a_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCamelCase : Optional[Any] = vector.conj().T if is_complex else vector.T
lowerCamelCase : str = np.dot(a_, np.dot(a_, a_ ) )
# Check convergence.
lowerCamelCase : Optional[int] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCamelCase : int = True
lowerCamelCase : Optional[Any] = lambda_
if is_complex:
lowerCamelCase : Any = np.real(lambda_ )
return lambda_, vector
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCamelCase : str = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCamelCase : str = np.array([41, 4, 20] )
lowerCamelCase : Optional[Any] = real_input_matrix.astype(np.complexaaa )
lowerCamelCase : Dict = np.triu(1j * complex_input_matrix, 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCamelCase : List[Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCamelCase : str = real_input_matrix
lowerCamelCase : Any = real_vector
elif problem_type == "complex":
lowerCamelCase : str = complex_input_matrix
lowerCamelCase : Dict = complex_vector
# Our implementation.
lowerCamelCase , lowerCamelCase : List[str] = power_iteration(a_, a_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCamelCase , lowerCamelCase : Optional[Any] = np.linalg.eigh(a_ )
# Last eigenvalue is the maximum one.
lowerCamelCase : Dict = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCamelCase : List[str] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(a_ ) - np.abs(a_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 205 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
A: List[str] = False
@skip_mps
class SCREAMING_SNAKE_CASE__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : List[Any] = StableDiffusionAttendAndExcitePipeline
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[Any] = TEXT_TO_IMAGE_PARAMS
__lowerCAmelCase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
__lowerCAmelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> str:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(_A )
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> Union[str, Any]:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(_A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase : Optional[int] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
UpperCAmelCase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
UpperCAmelCase : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> int:
'''simple docstring'''
if str(_A ).startswith("""mps""" ):
UpperCAmelCase : Dict = torch.manual_seed(_A )
else:
UpperCAmelCase : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] = '''cpu'''
UpperCAmelCase : Optional[Any] = self.get_dummy_components()
UpperCAmelCase : Optional[int] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase : Tuple = self.get_dummy_inputs(_A )
UpperCAmelCase : Any = pipe(**_A ).images
UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
UpperCAmelCase : int = np.array(
[0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496] )
UpperCAmelCase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1E-3 )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> Optional[Any]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(_A )
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> Tuple:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(_A )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple = torch.manual_seed(51 )
UpperCAmelCase : Tuple = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=_A , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
UpperCAmelCase : List[Any] = '''a painting of an elephant with glasses'''
UpperCAmelCase : List[str] = [5, 7]
UpperCAmelCase : List[str] = pipe(
prompt=_A , token_indices=_A , guidance_scale=7.5 , generator=_A , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
UpperCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5E-1
| 109 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( enum.Enum ):
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Any = 1
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[Any] = '''generated'''
def __init__( self : str , *_A : int , **_A : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __a ( self : int , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=None , _A : Dict=None , _A : Union[str, Any]=None , _A : int=None , **_A : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase : str = {}
if truncation is not None:
lowercase : Tuple = truncation
lowercase : Tuple = generate_kwargs
lowercase : Optional[Any] = {}
if return_tensors is not None and return_type is None:
lowercase : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase : Dict = return_type
if clean_up_tokenization_spaces is not None:
lowercase : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase : Dict = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowercase : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self : str , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
return True
def __a ( self : Union[str, Any] , *_A : Union[str, Any] , _A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Tuple = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowercase : List[Any] = ([prefix + arg for arg in args[0]],)
lowercase : Dict = True
elif isinstance(args[0] , _A ):
lowercase : Optional[int] = (prefix + args[0],)
lowercase : Union[str, Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowercase : Any = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Union[str, Any] , *_A : Optional[int] , **_A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __a ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def __a ( self : int , _A : Optional[Any] , **_A : Any ) -> Any:
"""simple docstring"""
if self.framework == "pt":
lowercase , lowercase : List[Any] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowercase , lowercase : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
lowercase : int = generate_kwargs.get('''min_length''' , self.model.config.min_length )
lowercase : Optional[int] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
lowercase : int = self.model.generate(**_A , **_A )
lowercase : int = output_ids.shape[0]
if self.framework == "pt":
lowercase : Optional[Any] = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase : Tuple = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __a ( self : Union[str, Any] , _A : str , _A : Optional[int]=ReturnType.TEXT , _A : Optional[int]=False ) -> Tuple:
"""simple docstring"""
lowercase : Any = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowercase : Dict = {
f"""{self.return_name}_text""": self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''summary'''
def __call__( self : List[Any] , *_A : List[str] , **_A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return super().__call__(*_A , **_A )
def __a ( self : Any , _A : int , _A : int , _A : int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''translation'''
def __a ( self : Union[str, Any] , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def __a ( self : Optional[Any] , *_A : Optional[Any] , _A : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , _A : List[Any]=None , _A : Any=None ) -> Dict:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def __a ( self : Any , _A : Tuple=None , _A : Any=None , **_A : Any ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase , lowercase : Dict = super()._sanitize_parameters(**_A )
if src_lang is not None:
lowercase : Optional[Any] = src_lang
if tgt_lang is not None:
lowercase : Dict = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase : Dict = kwargs.get('''task''' , self.task )
lowercase : List[str] = task.split('''_''' )
if task and len(_A ) == 4:
# translation, XX, to YY
lowercase : Any = items[1]
lowercase : List[str] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Tuple , *_A : Union[str, Any] , **_A : List[Any] ) -> List[Any]:
"""simple docstring"""
return super().__call__(*_A , **_A ) | 308 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258 | from __future__ import annotations
from math import pi
def A ( _lowercase , _lowercase , _lowercase ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | 1 |
from __future__ import annotations
UpperCamelCase__ = []
def _a ( SCREAMING_SNAKE_CASE_ : list[list[int]] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if board[row][i] == 1:
return False
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(SCREAMING_SNAKE_CASE_ , -1 , -1 ) , range(SCREAMING_SNAKE_CASE_ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(SCREAMING_SNAKE_CASE_ , -1 , -1 ) , range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) ):
if board[i][j] == 1:
return False
return True
def _a ( SCREAMING_SNAKE_CASE_ : list[list[int]] , SCREAMING_SNAKE_CASE_ : int ):
if row >= len(SCREAMING_SNAKE_CASE_ ):
solution.append(SCREAMING_SNAKE_CASE_ )
printboard(SCREAMING_SNAKE_CASE_ )
print()
return True
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if is_safe(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = 1
solve(SCREAMING_SNAKE_CASE_ , row + 1 )
__lowerCAmelCase = 0
return False
def _a ( SCREAMING_SNAKE_CASE_ : list[list[int]] ):
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
UpperCamelCase__ = 8
UpperCamelCase__ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 92 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class a__ :
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
__lowerCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
__lowerCAmelCase = after_output[0].numpy()
__lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__lowerCAmelCase = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A )
__lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_A ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase = to_atuple(vision_model.config.image_size )
__lowerCAmelCase = to_atuple(vision_model.config.patch_size )
__lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(_A , _A , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_pretrained_model_and_inputs()
__lowerCAmelCase = model_a(**_A )
__lowerCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_A )
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A )
__lowerCAmelCase = model_a(**_A )
__lowerCAmelCase = after_outputs[0].numpy()
__lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
@require_tf
class a__ ( snake_case__ , unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
__lowerCAmelCase = 1_3
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFViTModel(_A , name="vision_model" )
__lowerCAmelCase = TFBertModel(_A , name="text_model" )
return vision_model, text_model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFViTModelTester(self )
__lowerCAmelCase = TFBertModelTester(self )
__lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class a__ ( snake_case__ , unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
__lowerCAmelCase = 1_3
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__lowerCAmelCase = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A )
__lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_A ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowerCAmelCase = to_atuple(vision_model.config.image_size )
__lowerCAmelCase = to_atuple(vision_model.config.patch_size )
__lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModel(_A , name="vision_model" )
__lowerCAmelCase = TFRobertaModel(_A , name="text_model" )
return vision_model, text_model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModelTester(self )
__lowerCAmelCase = TFRobertaModelTester(self )
__lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class a__ ( snake_case__ , unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
__lowerCAmelCase = 1_3
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFCLIPVisionModel(_A , name="vision_model" )
__lowerCAmelCase = TFBertModel(_A , name="text_model" )
return vision_model, text_model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFCLIPVisionModelTester(self )
__lowerCAmelCase = TFBertModelTester(self )
__lowerCAmelCase = clip_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_A )
__lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__lowerCAmelCase = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_A , padding=_A , return_tensors="np" )
__lowerCAmelCase = model(**_A )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCAmelCase = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _A , atol=1E-3 ) )
| 92 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Any = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCAmelCase__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCAmelCase__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCAmelCase__ )
return parser.parse_args()
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = parse_args()
# Import training_script as a module.
UpperCAmelCase_: Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase_: Optional[int] = script_fpath.stem
UpperCAmelCase_: Optional[Any] = importlib.import_module(lowerCAmelCase__ )
# Patch sys.argv
UpperCAmelCase_: List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 355 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ():
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: List[Any] = """mock-s3-bucket"""
UpperCAmelCase_: str = F's3://{mock_bucket}'
UpperCAmelCase_: Any = extract_path_from_uri(lowerCAmelCase__ )
assert dataset_path.startswith("""s3://""" ) is False
UpperCAmelCase_: Tuple = """./local/path"""
UpperCAmelCase_: Any = extract_path_from_uri(lowerCAmelCase__ )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ (lowerCAmelCase__: Dict ):
"""simple docstring"""
UpperCAmelCase_: int = is_remote_filesystem(lowerCAmelCase__ )
assert is_remote is True
UpperCAmelCase_: Optional[Any] = fsspec.filesystem("""file""" )
UpperCAmelCase_: int = is_remote_filesystem(lowerCAmelCase__ )
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" , lowerCAmelCase__ )
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: List[str] , lowerCAmelCase__: Dict , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Tuple ):
"""simple docstring"""
UpperCAmelCase_: int = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
UpperCAmelCase_: Dict = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase_: str = F'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase__ )
UpperCAmelCase_: Optional[int] = fsspec.filesystem(compression_fs_class.protocol , fo=lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_: Optional[Any] = os.path.basename(lowerCAmelCase__ )
UpperCAmelCase_: Optional[Any] = expected_filename[: expected_filename.rindex(""".""" )]
assert fs.glob("""*""" ) == [expected_filename]
with fs.open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" ) as f, open(lowerCAmelCase__ , encoding="""utf-8""" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] )
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: Tuple , lowerCAmelCase__: Any ):
"""simple docstring"""
UpperCAmelCase_: List[str] = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
UpperCAmelCase_: Tuple = compressed_file_paths[protocol]
UpperCAmelCase_: int = """dataset.jsonl"""
UpperCAmelCase_: Any = F'{protocol}://{member_file_path}::{compressed_file_path}'
UpperCAmelCase_ , *UpperCAmelCase_: Dict = fsspec.get_fs_token_paths(lowerCAmelCase__ )
assert fs.isfile(lowerCAmelCase__ )
assert not fs.isfile("""non_existing_""" + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[str] , lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_: Tuple = hf_api.dataset_info(lowerCAmelCase__ , token=lowerCAmelCase__ )
UpperCAmelCase_: List[str] = HfFileSystem(repo_info=lowerCAmelCase__ , token=lowerCAmelCase__ )
assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"]
assert hffs.isdir("""data""" )
assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" )
with open(lowerCAmelCase__ ) as f:
assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read()
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: List[str] = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowerCAmelCase__ , lowerCAmelCase__ , clobber=lowerCAmelCase__ )
with pytest.warns(lowerCAmelCase__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowerCAmelCase__ ) == 1
assert (
str(warning_info[0].message )
== F'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 82 | 0 |
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> list:
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowerCamelCase : Dict = True
for j in range(_SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowerCamelCase : Optional[int] = False
break
if match_found:
position.append(_SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 48 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 279 | 0 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
lowerCamelCase__ : Any = old_name
if "patch_embed" in old_name:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = old_name.split('.' )
if layer == "0":
lowerCamelCase__ : Optional[int] = old_name.replace('0' , 'convolution1' )
elif layer == "1":
lowerCamelCase__ : Dict = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
lowerCamelCase__ : List[Any] = old_name.replace('3' , 'convolution2' )
else:
lowerCamelCase__ : Tuple = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(r'\d\.\d' , _UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = r'\b\d{2}\b'
if bool(re.search(_UpperCAmelCase , _UpperCAmelCase ) ):
lowerCamelCase__ : Optional[int] = re.search(r'\d\.\d\d.' , _UpperCAmelCase ).group()
else:
lowerCamelCase__ : Dict = re.search(r'\d\.\d.' , _UpperCAmelCase ).group()
if int(match[0] ) < 6:
lowerCamelCase__ : Optional[Any] = old_name.replace(_UpperCAmelCase , '' )
lowerCamelCase__ : Dict = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
lowerCamelCase__ : List[str] = 'intermediate_stages.' + trimmed_name
else:
lowerCamelCase__ : Tuple = old_name.replace(_UpperCAmelCase , '' )
if int(match[2] ) < num_meta4D_last_stage:
lowerCamelCase__ : Optional[Any] = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
lowerCamelCase__ : List[Any] = str(int(match[2] ) - num_meta4D_last_stage )
lowerCamelCase__ : Dict = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
lowerCamelCase__ : Optional[Any] = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
lowerCamelCase__ : Any = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
lowerCamelCase__ : Dict = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
lowerCamelCase__ : Optional[Any] = trimmed_name.replace('fc2' , 'linear_out' )
lowerCamelCase__ : List[Any] = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(r'.\d.' , _UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
lowerCamelCase__ : int = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowerCamelCase__ : Union[str, Any] = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowerCamelCase__ : Union[str, Any] = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
lowerCamelCase__ : List[Any] = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
lowerCamelCase__ : Optional[Any] = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
lowerCamelCase__ : Optional[int] = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
lowerCamelCase__ : Dict = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowerCamelCase__ : Optional[int] = new_name.replace('norm' , 'layernorm' )
lowerCamelCase__ : int = 'efficientformer.' + new_name
else:
lowerCamelCase__ : Union[str, Any] = 'efficientformer.encoder.' + new_name
return new_name
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Any:
for key in checkpoint.copy().keys():
lowerCamelCase__ : Dict = checkpoint.pop(_UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = val
return checkpoint
def SCREAMING_SNAKE_CASE ( ) -> Dict:
lowerCamelCase__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return image
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
lowerCamelCase__ : str = torch.load(_UpperCAmelCase , map_location='cpu' )['model']
lowerCamelCase__ : str = EfficientFormerConfig.from_json_file(_UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = EfficientFormerForImageClassificationWithTeacher(_UpperCAmelCase )
lowerCamelCase__ : Tuple = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
lowerCamelCase__ : Union[str, Any] = config.depths[-1] - config.num_metaad_blocks + 1
lowerCamelCase__ : int = convert_torch_checkpoint(_UpperCAmelCase , _UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowerCamelCase__ : List[Any] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
lowerCamelCase__ : Any = prepare_img()
lowerCamelCase__ : Optional[int] = 256
lowerCamelCase__ : int = 224
lowerCamelCase__ : List[Any] = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
lowerCamelCase__ : Union[str, Any] = processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values
# original processing pipeline
lowerCamelCase__ : Tuple = Compose(
[
Resize(_UpperCAmelCase , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(_UpperCAmelCase ),
ToTensor(),
Normalize(_UpperCAmelCase , _UpperCAmelCase ),
] )
lowerCamelCase__ : Optional[Any] = image_transforms(_UpperCAmelCase ).unsqueeze(0 )
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : List[str] = model(_UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = outputs.logits
lowerCamelCase__ : List[Any] = (1, 1000)
if "l1" in model_name:
lowerCamelCase__ : Optional[int] = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , _UpperCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowerCamelCase__ : List[Any] = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , _UpperCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowerCamelCase__ : Tuple = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(_UpperCAmelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
_UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 45 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
@slow
def A_ ( self : Any ) -> int:
lowerCamelCase__ : int = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
lowerCamelCase__ : Any = 'The dog is cute and lives in the garden house'
lowerCamelCase__ : Union[str, Any] = jnp.array([tokenizer.encode(UpperCAmelCase )] )
lowerCamelCase__ : Optional[int] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase__ : str = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase )['last_hidden_state']
self.assertEqual(output.shape , UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCAmelCase , atol=1e-3 ) )
| 45 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.