code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = get_activation("swish" )
self.assertIsInstance(snake_case__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = get_activation("silu" )
self.assertIsInstance(snake_case__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = get_activation("mish" )
self.assertIsInstance(snake_case__ , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = get_activation("gelu" )
self.assertIsInstance(snake_case__ , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 108 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : str ="llama"
a : List[str] =["past_key_values"]
def __init__( self , snake_case__=32_000 , snake_case__=4_096 , snake_case__=11_008 , snake_case__=32 , snake_case__=32 , snake_case__=None , snake_case__="silu" , snake_case__=2_048 , snake_case__=0.02 , snake_case__=1e-6 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=1 , snake_case__=False , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : str = hidden_size
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase : Tuple = num_attention_heads
lowerCAmelCase : Dict = num_key_value_heads
lowerCAmelCase : Optional[Any] = hidden_act
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Any = rms_norm_eps
lowerCAmelCase : List[Any] = pretraining_tp
lowerCAmelCase : int = use_cache
lowerCAmelCase : List[str] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ , )
def lowercase__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
lowerCAmelCase : Optional[Any] = self.rope_scaling.get("type" , snake_case__ )
lowerCAmelCase : int = self.rope_scaling.get("factor" , snake_case__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(snake_case__ , snake_case__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 108 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__a = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase_ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase = ["""pixel_values"""]
def __init__( self : Optional[int] , snake_case_ : List[str] = True , snake_case_ : int = None , snake_case_ : Union[str, Any] = PILImageResampling.BICUBIC , snake_case_ : int = True , snake_case_ : Union[str, Any] = None , snake_case_ : str = True , snake_case_ : List[str] = 1 / 255 , snake_case_ : List[str] = True , snake_case_ : Tuple = None , snake_case_ : int = None , snake_case_ : List[Any] = True , **snake_case_ : str , ):
super().__init__(**snake_case__ )
snake_case__ : str = size if size is not None else {'''shortest_edge''': 224}
snake_case__ : Tuple = get_size_dict(snake_case__ , default_to_square=snake_case__ )
snake_case__ : Tuple = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
snake_case__ : List[Any] = get_size_dict(snake_case__ , default_to_square=snake_case__ , param_name="""crop_size""" )
snake_case__ : Tuple = do_resize
snake_case__ : List[Any] = size
snake_case__ : List[Any] = resample
snake_case__ : Dict = do_center_crop
snake_case__ : str = crop_size
snake_case__ : Any = do_rescale
snake_case__ : Optional[int] = rescale_factor
snake_case__ : List[Any] = do_normalize
snake_case__ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case__ : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case__ : int = do_convert_rgb
def lowerCamelCase ( self : List[Any] , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict = PILImageResampling.BICUBIC , snake_case_ : Union[str, Any] = None , **snake_case_ : Tuple , ):
snake_case__ : Union[str, Any] = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case__ : List[Any] = get_resize_output_image_size(snake_case__ , size=size["""shortest_edge"""] , default_to_square=snake_case__ )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Optional[Any] = None , **snake_case_ : Union[str, Any] , ):
snake_case__ : Tuple = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(snake_case__ , size=(size["""height"""], size["""width"""]) , data_format=snake_case__ , **snake_case__ )
def lowerCamelCase ( self : int , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Dict = None , **snake_case_ : int , ):
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any = None , **snake_case_ : Optional[Any] , ):
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowerCamelCase ( self : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] = None , snake_case_ : Any = None , snake_case_ : str = None , snake_case_ : Tuple = None , snake_case_ : List[str] = None , snake_case_ : List[Any] = None , snake_case_ : Tuple = None , snake_case_ : List[Any] = None , snake_case_ : int = None , snake_case_ : Dict = None , snake_case_ : List[str] = None , snake_case_ : Union[str, Any] = None , snake_case_ : str = ChannelDimension.FIRST , **snake_case_ : List[str] , ):
snake_case__ : str = do_resize if do_resize is not None else self.do_resize
snake_case__ : List[str] = size if size is not None else self.size
snake_case__ : Tuple = get_size_dict(snake_case__ , param_name="""size""" , default_to_square=snake_case__ )
snake_case__ : List[Any] = resample if resample is not None else self.resample
snake_case__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ : int = crop_size if crop_size is not None else self.crop_size
snake_case__ : Optional[Any] = get_size_dict(snake_case__ , param_name="""crop_size""" , default_to_square=snake_case__ )
snake_case__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
snake_case__ : Union[str, Any] = image_std if image_std is not None else self.image_std
snake_case__ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case__ : Optional[Any] = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case__ : Optional[int] = [convert_to_rgb(snake_case__ ) for image in images]
# All transformations expect numpy arrays.
snake_case__ : List[Any] = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
snake_case__ : Optional[int] = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_center_crop:
snake_case__ : Optional[Any] = [self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images]
if do_rescale:
snake_case__ : List[Any] = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
snake_case__ : str = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
snake_case__ : Dict = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
snake_case__ : Union[str, Any] = {'''pixel_values''': images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 361 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__a = logging.getLogger(__name__)
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , snake_case_ : Tuple=None ):
super().__init__(
snake_case_ , question_encoder_tokenizer=snake_case_ , generator_tokenizer=snake_case_ , index=snake_case_ , init_retrieval=snake_case_ , )
snake_case__ : int = None
def lowerCamelCase ( self : int , snake_case_ : int ):
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
snake_case__ : Optional[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
snake_case__ : int = str(distributed_port + 1 )
snake_case__ : List[str] = dist.new_group(ranks=snake_case_ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowerCamelCase ( self : Optional[Any] ):
return dist.get_rank(group=self.process_group ) == 0
def lowerCamelCase ( self : int , snake_case_ : str , snake_case_ : int , snake_case_ : int=torch.floataa ):
snake_case__ : str = torch.empty(snake_case_ , dtype=snake_case_ )
dist.scatter(snake_case_ , src=0 , scatter_list=snake_case_ , group=self.process_group )
return target_tensor
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Dict = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
snake_case__ : Dict = next((addr for addr in addrs if addr.startswith("""e""" )) , snake_case_ )
return ifname
def lowerCamelCase ( self : Tuple , snake_case_ : np.ndarray , snake_case_ : int ):
# single GPU training
if not dist.is_initialized():
snake_case__ , snake_case__ : Union[str, Any] = self._main_retrieve(snake_case_ , snake_case_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(snake_case_ )
# distributed training
snake_case__ : Optional[int] = dist.get_world_size(group=self.process_group )
# gather logic
snake_case__ : str = None
if self._is_main():
snake_case__ : Any = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(snake_case_ )]
dist.gather(torch.tensor(snake_case_ ) , dst=0 , gather_list=snake_case_ , group=self.process_group )
# scatter logic
snake_case__ : Union[str, Any] = question_hidden_states.shape[0]
snake_case__ : List[str] = []
snake_case__ : Dict = []
if self._is_main():
assert len(snake_case_ ) == world_size
snake_case__ , snake_case__ : Union[str, Any] = self._main_retrieve(torch.cat(snake_case_ ).numpy() , snake_case_ )
snake_case__ , snake_case__ : Dict = torch.tensor(snake_case_ ), torch.tensor(snake_case_ )
snake_case__ : Union[str, Any] = self._chunk_tensor(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = self._chunk_tensor(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = self._scattered(snake_case_ , [n_queries, n_docs] , target_type=torch.intaa )
snake_case__ : Dict = self._scattered(snake_case_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(snake_case_ )
| 43 | 0 |
from timeit import timeit
def _a ( SCREAMING_SNAKE_CASE : Any ) -> List[str]:
"""simple docstring"""
if number < 0:
raise ValueError('the value of input must not be negative' )
__lowerCAmelCase: Optional[Any] = 0
while number:
number &= number - 1
result += 1
return result
def _a ( SCREAMING_SNAKE_CASE : List[Any] ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('the value of input must not be negative' )
__lowerCAmelCase: Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _a ( ) -> List[str]:
"""simple docstring"""
def do_benchmark(SCREAMING_SNAKE_CASE : Optional[Any] ) -> None:
__lowerCAmelCase: List[Any] = 'import __main__ as z'
print(f'''Benchmark when {number = }:''' )
print(f'''{get_set_bits_count_using_modulo_operator(SCREAMING_SNAKE_CASE ) = }''' )
__lowerCAmelCase: List[str] = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=SCREAMING_SNAKE_CASE )
print(f'''timeit() runs in {timing} seconds''' )
print(f'''{get_set_bits_count_using_brian_kernighans_algorithm(SCREAMING_SNAKE_CASE ) = }''' )
__lowerCAmelCase: Tuple = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=SCREAMING_SNAKE_CASE , )
print(f'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 322 | """simple docstring"""
UpperCAmelCase__ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
UpperCAmelCase__ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 1_2,
"""Pm""": 1_5,
"""Em""": 1_8,
"""Zm""": 2_1,
"""Ym""": 2_4,
}
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = from_type.lower().strip("""s""" )
_UpperCAmelCase = to_type.lower().strip("""s""" )
_UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase )
_UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase )
if from_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(lowercase )}'''
)
raise ValueError(lowercase )
if to_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(lowercase )}'''
)
raise ValueError(lowercase )
_UpperCAmelCase = METRIC_CONVERSION[from_sanitized]
_UpperCAmelCase = METRIC_CONVERSION[to_sanitized]
_UpperCAmelCase = 1
if from_exponent > to_exponent:
_UpperCAmelCase = from_exponent - to_exponent
else:
_UpperCAmelCase = -(to_exponent - from_exponent)
return value * pow(10 ,lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 289 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : Optional[Any] = "▁"
_lowercase : Any = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowercase : Union[str, Any] = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowercase : str = {"vinai/bartpho-syllable": 1_0_2_4}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
def __init__( self : str, lowerCamelCase : Dict, lowerCamelCase : Tuple, lowerCamelCase : Optional[Any]="<s>", lowerCamelCase : List[Any]="</s>", lowerCamelCase : Optional[int]="</s>", lowerCamelCase : int="<s>", lowerCamelCase : List[Any]="<unk>", lowerCamelCase : Dict="<pad>", lowerCamelCase : Tuple="<mask>", lowerCamelCase : Optional[Dict[str, Any]] = None, **lowerCamelCase : Dict, )-> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : str =AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else mask_token
lowerCamelCase__ : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
lowerCamelCase__ : Optional[Any] =vocab_file
lowerCamelCase__ : Any =monolingual_vocab_file
lowerCamelCase__ : str =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowerCamelCase__ : Any ={}
lowerCamelCase__ : Optional[Any] =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
lowerCamelCase__ : Optional[int] =cnt
cnt += 1
with open(lowerCamelCase, '''r''', encoding='''utf-8''' ) as f:
for line in f.readlines():
lowerCamelCase__ : List[str] =line.strip().split()[0]
lowerCamelCase__ : Dict =len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
lowerCamelCase__ : Tuple =len(self.fairseq_tokens_to_ids )
lowerCamelCase__ : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =self.__dict__.copy()
lowerCamelCase__ : Optional[int] =None
lowerCamelCase__ : int =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any], lowerCamelCase : Tuple )-> Any:
lowerCamelCase__ : Optional[int] =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase__ : str ={}
lowerCamelCase__ : str =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case ( self : Dict, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : Optional[Any] =[self.cls_token_id]
lowerCamelCase__ : Optional[int] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self : Optional[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None, lowerCamelCase : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase, token_ids_a=lowerCamelCase, already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def snake_case ( self : Dict, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None )-> List[int]:
lowerCamelCase__ : Optional[Any] =[self.sep_token_id]
lowerCamelCase__ : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case ( self : int )-> Optional[int]:
return len(self.fairseq_ids_to_tokens )
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ : Optional[Any] ={self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case ( self : Optional[int], lowerCamelCase : str )-> List[str]:
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase )
def snake_case ( self : int, lowerCamelCase : Optional[Any] )-> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case ( self : Any, lowerCamelCase : Optional[int] )-> Optional[int]:
return self.fairseq_ids_to_tokens[index]
def snake_case ( self : int, lowerCamelCase : Any )-> Optional[int]:
lowerCamelCase__ : List[str] =''''''.join(lowerCamelCase ).replace(lowerCamelCase, ''' ''' ).strip()
return out_string
def snake_case ( self : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : Dict =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ : Tuple =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''], )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase, '''wb''' ) as fi:
lowerCamelCase__ : Any =self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file, lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(lowerCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 368 |
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int , __lowerCamelCase : float = 1 , __lowerCamelCase : float = 1 , __lowerCamelCase : float = 1.0e4 , __lowerCamelCase : bool = False , __lowerCamelCase : float = 1.0 , ):
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
lowerCamelCase__ : Any =float(embedding_dim // 2 )
lowerCamelCase__ : List[str] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCamelCase__ : int =min_timescale * jnp.exp(jnp.arange(__lowerCamelCase , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCamelCase__ : Tuple =jnp.expand_dims(__lowerCamelCase , 1 ) * jnp.expand_dims(__lowerCamelCase , 0 )
# scale embeddings
lowerCamelCase__ : List[str] =scale * emb
if flip_sin_to_cos:
lowerCamelCase__ : int =jnp.concatenate([jnp.cos(__lowerCamelCase ), jnp.sin(__lowerCamelCase )] , axis=1 )
else:
lowerCamelCase__ : List[str] =jnp.concatenate([jnp.sin(__lowerCamelCase ), jnp.cos(__lowerCamelCase )] , axis=1 )
lowerCamelCase__ : str =jnp.reshape(__lowerCamelCase , [jnp.shape(__lowerCamelCase )[0], embedding_dim] )
return signal
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 3_2
_a = jnp.floataa
@nn.compact
def __call__( self : Optional[Any], lowerCamelCase : int )-> Any:
lowerCamelCase__ : Optional[Any] =nn.Dense(self.time_embed_dim, dtype=self.dtype, name='''linear_1''' )(lowerCamelCase )
lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase )
lowerCamelCase__ : Any =nn.Dense(self.time_embed_dim, dtype=self.dtype, name='''linear_2''' )(lowerCamelCase )
return temb
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 3_2
_a = False
_a = 1
@nn.compact
def __call__( self : Any, lowerCamelCase : int )-> int:
return get_sinusoidal_embeddings(
lowerCamelCase, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift )
| 272 | 0 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_snake_case = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
F" reinstalling {pkg}." )
if not ops[op](version.parse(_lowerCamelCase ) , version.parse(_lowerCamelCase ) ):
raise ImportError(
F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def A ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = F"\n{hint}" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" , _lowerCamelCase ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = requirement, None, None
else:
_lowerCAmelCase : Optional[int] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , _lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F" got {requirement}" )
_lowerCAmelCase , _lowerCAmelCase : Dict = match[0]
_lowerCAmelCase : Any = want_full.split("," ) # there could be multiple requirements
_lowerCAmelCase : Optional[int] = {}
for w in want_range:
_lowerCAmelCase : Any = re.findall(r"^([\s!=<>]{1,2})(.+)" , _lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F" but got {requirement}" )
_lowerCAmelCase , _lowerCAmelCase : Tuple = match[0]
_lowerCAmelCase : Union[str, Any] = want_ver
if op not in ops:
raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
_lowerCAmelCase : Tuple = ".".join([str(_lowerCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return
# check if any version is installed
try:
_lowerCAmelCase : Any = importlib.metadata.version(_lowerCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(_lowerCamelCase , _lowerCamelCase )
| 36 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_UpperCamelCase = get_logger(__name__)
class _A :
_SCREAMING_SNAKE_CASE : Dict = "dummy_data"
_SCREAMING_SNAKE_CASE : int = "datasets"
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Tuple = dataset_name
__UpperCAmelCase : List[Any] = cache_dir
__UpperCAmelCase : List[Any] = use_local_dummy_data
__UpperCAmelCase : str = config
# download_callbacks take a single url as input
__UpperCAmelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__UpperCAmelCase : Tuple = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__UpperCAmelCase : Dict = str(__UpperCAmelCase )
# to be downloaded
__UpperCAmelCase : Any = None
__UpperCAmelCase : List[str] = None
@property
def __A ( self ) -> Dict:
'''simple docstring'''
if self._dummy_file is None:
__UpperCAmelCase : Dict = self.download_dummy_data()
return self._dummy_file
@property
def __A ( self ) -> Any:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__UpperCAmelCase : Tuple = cached_path(
__UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=__UpperCAmelCase , force_extract=__UpperCAmelCase )
return os.path.join(__UpperCAmelCase , self.dummy_file_name )
@property
def __A ( self ) -> Any:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
if self._bucket_url is None:
__UpperCAmelCase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __A ( self ) -> Dict:
'''simple docstring'''
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __A ( self , __UpperCAmelCase , *__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__UpperCAmelCase : Dict = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__UpperCAmelCase : Optional[int] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return self.create_dummy_data_dict(__UpperCAmelCase , __UpperCAmelCase )
elif isinstance(__UpperCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(__UpperCAmelCase , __UpperCAmelCase )
else:
return self.create_dummy_data_single(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self , __UpperCAmelCase , *__UpperCAmelCase ) -> Dict:
'''simple docstring'''
return self.download_and_extract(__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
return self.download_and_extract(__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return path
def __A ( self ) -> List[Any]:
'''simple docstring'''
return {}
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : str = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
for single_url in single_urls:
download_callback(__UpperCAmelCase )
else:
__UpperCAmelCase : Union[str, Any] = single_urls
download_callback(__UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCAmelCase : str = [os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) for x in single_urls]
else:
__UpperCAmelCase : List[str] = single_urls
__UpperCAmelCase : Any = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) )
__UpperCAmelCase : Union[str, Any] = value
# make sure that values are unique
if all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__UpperCAmelCase : Tuple = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__UpperCAmelCase : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __UpperCAmelCase ) ) for url in data_url )
__UpperCAmelCase : str = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__UpperCAmelCase : int = [data_url[0]] * len(__UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__UpperCAmelCase : List[str] = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__UpperCAmelCase )
return dummy_data_list
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(__UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__UpperCAmelCase : List[str] = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __A ( self ) -> Tuple:
'''simple docstring'''
pass
def __A ( self ) -> int:
'''simple docstring'''
pass
def __A ( self , __UpperCAmelCase ) -> Any:
'''simple docstring'''
def _iter_archive_members(__UpperCAmelCase ):
# this preserves the order of the members inside the ZIP archive
__UpperCAmelCase : Dict = Path(self.dummy_file ).parent
__UpperCAmelCase : Dict = path.relative_to(__UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__UpperCAmelCase : List[str] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__UpperCAmelCase )
__UpperCAmelCase : Any = Path(__UpperCAmelCase )
__UpperCAmelCase : int = _iter_archive_members(__UpperCAmelCase ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__UpperCAmelCase ).as_posix(), file_path.open("""rb""" )
def __A ( self , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCAmelCase : List[Any] = [paths]
for path in paths:
if os.path.isfile(__UpperCAmelCase ):
if os.path.basename(__UpperCAmelCase ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__UpperCAmelCase ):
if os.path.basename(__UpperCAmelCase ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__UpperCAmelCase ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__UpperCAmelCase , __UpperCAmelCase )
| 254 | 0 |
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self , __UpperCamelCase = 6 ):
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = None
self.create_linked_list(__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = Node()
UpperCamelCase_ = current_node
UpperCamelCase_ = current_node
UpperCamelCase_ = current_node
for _ in range(1 , __UpperCamelCase ):
UpperCamelCase_ = Node()
UpperCamelCase_ = current_node
UpperCamelCase_ = previous_node
UpperCamelCase_ = current_node
UpperCamelCase_ = self.front
UpperCamelCase_ = previous_node
def lowerCamelCase_ ( self ):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCamelCase_ = self.rear.next
if self.rear:
UpperCamelCase_ = data
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCamelCase_ = self.front.data
UpperCamelCase_ = None
return data
UpperCamelCase_ = self.front
UpperCamelCase_ = old_front.next
UpperCamelCase_ = old_front.data
UpperCamelCase_ = None
return data
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class lowercase_ :
def __init__( self ):
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261 |
from __future__ import annotations
def lowerCamelCase__ ( a__ : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(a__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(a__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261 | 1 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def UpperCamelCase_( lowerCamelCase_ ) -> str:
return "".join(sorted(lowerCamelCase_ ) )
def UpperCamelCase_( lowerCamelCase_ ) -> list[str]:
return word_by_signature[signature(lowerCamelCase_ )]
SCREAMING_SNAKE_CASE : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
SCREAMING_SNAKE_CASE : List[str] = sorted({word.strip().lower() for word in data.splitlines()})
SCREAMING_SNAKE_CASE : int = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 21 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__UpperCamelCase = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__UpperCamelCase = '''UperNetConfig'''
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0 , lowerCAmelCase__ = False , lowerCAmelCase__ = 1 , ) -> None:
super().__init__()
SCREAMING_SNAKE_CASE = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , padding=lowerCAmelCase__ , bias=lowerCAmelCase__ , dilation=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = nn.BatchNormad(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = nn.ReLU()
def __A ( self , lowerCAmelCase__ ) -> torch.Tensor:
SCREAMING_SNAKE_CASE = self.conv(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.batch_norm(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.activation(lowerCAmelCase__ )
return output
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
super().__init__()
SCREAMING_SNAKE_CASE = [
nn.AdaptiveAvgPoolad(lowerCAmelCase__ ),
UperNetConvModule(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> torch.Tensor:
SCREAMING_SNAKE_CASE = input
for layer in self.layers:
SCREAMING_SNAKE_CASE = layer(lowerCAmelCase__ )
return hidden_state
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
super().__init__()
SCREAMING_SNAKE_CASE = pool_scales
SCREAMING_SNAKE_CASE = align_corners
SCREAMING_SNAKE_CASE = in_channels
SCREAMING_SNAKE_CASE = channels
SCREAMING_SNAKE_CASE = []
for i, pool_scale in enumerate(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = UperNetPyramidPoolingBlock(pool_scale=lowerCAmelCase__ , in_channels=lowerCAmelCase__ , channels=lowerCAmelCase__ )
self.blocks.append(lowerCAmelCase__ )
self.add_module(str(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> List[torch.Tensor]:
SCREAMING_SNAKE_CASE = []
for ppm in self.blocks:
SCREAMING_SNAKE_CASE = ppm(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = nn.functional.interpolate(
lowerCAmelCase__ , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(lowerCAmelCase__ )
return ppm_outs
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = config.pool_scales # e.g. (1, 2, 3, 6)
SCREAMING_SNAKE_CASE = in_channels
SCREAMING_SNAKE_CASE = config.hidden_size
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
SCREAMING_SNAKE_CASE = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
SCREAMING_SNAKE_CASE = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
SCREAMING_SNAKE_CASE = nn.ModuleList()
SCREAMING_SNAKE_CASE = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
SCREAMING_SNAKE_CASE = UperNetConvModule(lowerCAmelCase__ , self.channels , kernel_size=1 )
SCREAMING_SNAKE_CASE = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(lowerCAmelCase__ )
self.fpn_convs.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __A ( self ) -> int:
self.apply(self._init_weights )
def __A ( self , lowerCAmelCase__ ) -> Tuple:
if isinstance(lowerCAmelCase__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __A ( self , lowerCAmelCase__ ) -> Optional[int]:
SCREAMING_SNAKE_CASE = inputs[-1]
SCREAMING_SNAKE_CASE = [x]
psp_outs.extend(self.psp_modules(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.cat(lowerCAmelCase__ , dim=1 )
SCREAMING_SNAKE_CASE = self.bottleneck(lowerCAmelCase__ )
return output
def __A ( self , lowerCAmelCase__ ) -> torch.Tensor:
# build laterals
SCREAMING_SNAKE_CASE = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(lowerCAmelCase__ ) )
# build top-down path
SCREAMING_SNAKE_CASE = len(lowerCAmelCase__ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE = laterals[i - 1].shape[2:]
SCREAMING_SNAKE_CASE = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=lowerCAmelCase__ , mode='bilinear' , align_corners=self.align_corners )
# build outputs
SCREAMING_SNAKE_CASE = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
SCREAMING_SNAKE_CASE = torch.cat(lowerCAmelCase__ , dim=1 )
SCREAMING_SNAKE_CASE = self.fpn_bottleneck(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.classifier(lowerCAmelCase__ )
return output
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 1 ) -> None:
super().__init__()
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = config.auxiliary_in_channels
SCREAMING_SNAKE_CASE = config.auxiliary_channels
SCREAMING_SNAKE_CASE = config.auxiliary_num_convs
SCREAMING_SNAKE_CASE = config.auxiliary_concat_input
SCREAMING_SNAKE_CASE = in_index
SCREAMING_SNAKE_CASE = (kernel_size // 2) * dilation
SCREAMING_SNAKE_CASE = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=lowerCAmelCase__ , padding=lowerCAmelCase__ , dilation=lowerCAmelCase__ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=lowerCAmelCase__ , padding=lowerCAmelCase__ , dilation=lowerCAmelCase__ ) )
if self.num_convs == 0:
SCREAMING_SNAKE_CASE = nn.Identity()
else:
SCREAMING_SNAKE_CASE = nn.Sequential(*lowerCAmelCase__ )
if self.concat_input:
SCREAMING_SNAKE_CASE = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=lowerCAmelCase__ , padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __A ( self ) -> Dict:
self.apply(self._init_weights )
def __A ( self , lowerCAmelCase__ ) -> Dict:
if isinstance(lowerCAmelCase__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __A ( self , lowerCAmelCase__ ) -> torch.Tensor:
# just take the relevant feature maps
SCREAMING_SNAKE_CASE = encoder_hidden_states[self.in_index]
SCREAMING_SNAKE_CASE = self.convs(lowerCAmelCase__ )
if self.concat_input:
SCREAMING_SNAKE_CASE = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
SCREAMING_SNAKE_CASE = self.classifier(lowerCAmelCase__ )
return output
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = UperNetConfig
SCREAMING_SNAKE_CASE_ : Optional[Any] = """pixel_values"""
SCREAMING_SNAKE_CASE_ : Optional[int] = True
def __A ( self , lowerCAmelCase__ ) -> List[str]:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __A ( self ) -> int:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> str:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = value
__UpperCamelCase = R'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__UpperCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , lowerCamelCase_ , )
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ ) -> Optional[int]:
super().__init__(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
SCREAMING_SNAKE_CASE = UperNetHead(lowerCAmelCase__ , in_channels=self.backbone.channels )
SCREAMING_SNAKE_CASE = UperNetFCNHead(lowerCAmelCase__ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC )
def __A ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Union[tuple, SemanticSegmenterOutput]:
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE = output_attentions if output_attentions is not None else self.config.output_attentions
SCREAMING_SNAKE_CASE = self.backbone.forward_with_filtered_kwargs(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs.feature_maps
SCREAMING_SNAKE_CASE = self.decode_head(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = nn.functional.interpolate(lowerCAmelCase__ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = None
if self.auxiliary_head is not None:
SCREAMING_SNAKE_CASE = self.auxiliary_head(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = nn.functional.interpolate(
lowerCAmelCase__ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
SCREAMING_SNAKE_CASE = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
SCREAMING_SNAKE_CASE = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
SCREAMING_SNAKE_CASE = (logits,) + outputs[1:]
else:
SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 113 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def __init__( self , a):
lowercase__ : List[str] = data
def __iter__( self):
for element in self.data:
yield element
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ):
'''simple docstring'''
lowercase__ : List[Any] = Accelerator(even_batches=SCREAMING_SNAKE_CASE_ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool = False ):
'''simple docstring'''
if iterable:
lowercase__ : Optional[Any] = DummyIterableDataset(torch.as_tensor(range(SCREAMING_SNAKE_CASE_ ) ) )
else:
lowercase__ : Any = TensorDataset(torch.as_tensor(range(SCREAMING_SNAKE_CASE_ ) ) )
lowercase__ : Optional[Any] = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
return dl
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : List[int] , ):
'''simple docstring'''
lowercase__ : List[Any] = create_dataloader(accelerator=SCREAMING_SNAKE_CASE_ , dataset_size=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : str = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : Union[str, Any] = create_accelerator(even_batches=SCREAMING_SNAKE_CASE_ )
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : List[str] = create_accelerator(even_batches=SCREAMING_SNAKE_CASE_ )
lowercase__ : Dict = torch.nn.Linear(1 , 1 )
lowercase__ : Any = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
lowercase__ : str = create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 )
lowercase__ : List[str] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase__ : str = ddp_model(batch[0].float() )
lowercase__ : int = output.sum()
loss.backward()
batch_idxs.append(SCREAMING_SNAKE_CASE_ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE_ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , SCREAMING_SNAKE_CASE_ )
assert "only supported for multi-GPU" in str(w[-1].message )
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : int = True
lowercase__ : Optional[Any] = False
lowercase__ : Dict = create_accelerator(even_batches=SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = torch.nn.Linear(1 , 1 )
lowercase__ : Any = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
lowercase__ : Tuple = create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 )
lowercase__ : Optional[int] = create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=SCREAMING_SNAKE_CASE_ ):
lowercase__ : Tuple = train_dl.batch_sampler.even_batches
lowercase__ : Union[str, Any] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : Dict = True
lowercase__ : List[str] = False
lowercase__ : Dict = create_accelerator(even_batches=SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[int] = torch.nn.Linear(1 , 1 )
lowercase__ : str = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 , iterable=SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[Any] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : Tuple = create_accelerator()
lowercase__ : Optional[Any] = torch.nn.Linear(1 , 1 )
lowercase__ : Optional[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 , iterable=SCREAMING_SNAKE_CASE_ )
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE_ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=SCREAMING_SNAKE_CASE_ ):
pass
assert issubclass(w[-1].category , SCREAMING_SNAKE_CASE_ )
assert "only supported for map-style datasets" in str(w[-1].message )
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
lowercase__ : Dict = accelerator.state.distributed_type
lowercase__ : Any = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = original_state
if __name__ == "__main__":
main()
| 216 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def __init__( self , a , a = None , a = None , a = True , a = None , a = False , a = None , a = True , a = "arrow" , **a , ):
super().__init__(
split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , **a , )
lowercase__ : Optional[int] = load_from_cache_file
lowercase__ : Optional[int] = file_format
lowercase__ : int = Spark(
df=a , features=a , cache_dir=a , working_dir=a , **a , )
def snake_case_ ( self):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
lowercase__ : Dict = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=a , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split)
| 216 | 1 |
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :List[str] , lowerCamelCase_ :list[int] ):
"""simple docstring"""
lowerCamelCase__ : Any =len(lowerCamelCase_ )
lowerCamelCase__ : Tuple =[0] * len_array
if len_array > 0:
lowerCamelCase__ : Dict =array[0]
for i in range(1 , lowerCamelCase_ ):
lowerCamelCase__ : Optional[Any] =self.prefix_sum[i - 1] + array[i]
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :int ):
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :int ):
"""simple docstring"""
lowerCamelCase__ : int ={0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 126 |
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class A_ ( A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AlbertTokenizer
SCREAMING_SNAKE_CASE_ = AlbertTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Optional[int] =AlbertTokenizer(lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Dict ='this is a test'
lowerCamelCase__ : Union[str, Any] ='this is a test'
return input_text, output_text
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : Any ='<pad>'
lowerCamelCase__ : int =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : int =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(lowerCamelCase_ ) , 30_000 )
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase__ : str =self.get_tokenizer()
lowerCamelCase__ : int =self.get_rust_tokenizer()
lowerCamelCase__ : Optional[Any] ='I was born in 92000, and this is falsé.'
lowerCamelCase__ : str =tokenizer.tokenize(lowerCamelCase_ )
lowerCamelCase__ : Any =rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : Any =tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
lowerCamelCase__ : Any =rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =self.get_rust_tokenizer()
lowerCamelCase__ : List[str] =tokenizer.encode(lowerCamelCase_ )
lowerCamelCase__ : int =rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =AlbertTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
lowerCamelCase__ : List[Any] =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase_ , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [48, 25, 21, 1_289] )
lowerCamelCase__ : Union[str, Any] =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase_ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
lowerCamelCase__ : List[str] =tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase__ : Optional[int] =tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =AlbertTokenizer(lowerCamelCase_ )
lowerCamelCase__ : List[Any] =tokenizer.encode('sequence builders' )
lowerCamelCase__ : Optional[Any] =tokenizer.encode('multi-sequence build' )
lowerCamelCase__ : str =tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
lowerCamelCase__ : Dict =tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : Dict ={'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , ) | 126 | 1 |
import qiskit
def UpperCamelCase( lowercase_ , lowercase_ ) -> qiskit.result.counts.Counts:
'''simple docstring'''
snake_case_ = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
snake_case_ = qiskit.QuantumCircuit(lowercase_ , lowercase_ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
snake_case_ = qiskit.execute(lowercase_ , lowercase_ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""") | 34 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def UpperCamelCase( lowercase_ = "" ) -> dict[str, float]:
'''simple docstring'''
snake_case_ = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
snake_case_ = BeautifulSoup(requests.get(lowercase_ ).text , """html.parser""" )
snake_case_ = soup.find_all("""td""" , attrs="""titleColumn""" )
snake_case_ = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase_ , lowercase_ )
}
def UpperCamelCase( lowercase_ = "IMDb_Top_250_Movies.csv" ) -> None:
'''simple docstring'''
snake_case_ = get_imdb_top_aaa_movies()
with open(lowercase_ , """w""" , newline="""""" ) as out_file:
snake_case_ = csv.writer(lowercase_ )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies() | 34 | 1 |
from __future__ import annotations
lowercase : Tuple = """Muhammad Umer Farooq"""
lowercase : List[str] = """MIT"""
lowercase : Any = """1.0.0"""
lowercase : str = """Muhammad Umer Farooq"""
lowercase : List[str] = """contact@muhammadumerfarooq.me"""
lowercase : Union[str, Any] = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __snake_case ( lowerCAmelCase ):
def __init__( self ,snake_case ):
'''simple docstring'''
super().__init__()
lowercase : list[str] = []
lowercase : Union[str, Any] = domain
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowercase : Tuple = parse.urljoin(self.domain ,snake_case )
self.urls.append(snake_case )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
return ".".join(get_sub_domain_name(SCREAMING_SNAKE_CASE__ ).split(""".""" )[-2:] )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
return parse.urlparse(SCREAMING_SNAKE_CASE__ ).netloc
def _snake_case( SCREAMING_SNAKE_CASE__ = "https://github.com" ) -> list[str]:
lowercase : List[Any] = get_domain_name(SCREAMING_SNAKE_CASE__ )
# Initialize the parser
lowercase : List[str] = Parser(SCREAMING_SNAKE_CASE__ )
try:
# Open URL
lowercase : Tuple = requests.get(SCREAMING_SNAKE_CASE__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowercase : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowercase : Tuple = requests.get(SCREAMING_SNAKE_CASE__ )
# Get the valid email.
lowercase : Any = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(SCREAMING_SNAKE_CASE__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : Optional[Any] = emails_from_url("""https://github.com""")
print(F'''{len(emails)} emails found:''')
print("""\n""".join(sorted(emails)))
| 20 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase : Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __snake_case ( lowerCAmelCase ):
def __init__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
super().__init__(*snake_case ,**snake_case )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _SCREAMING_SNAKE_CASE ( self ,snake_case=None ):
'''simple docstring'''
lowercase : List[Any] = {}
if top_k is not None:
lowercase : int = top_k
return {}, {}, postprocess_params
def __call__( self ,snake_case ,**snake_case ):
'''simple docstring'''
return super().__call__(snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Any = load_image(snake_case )
lowercase : List[Any] = self.image_processor(images=snake_case ,return_tensors=self.framework )
return model_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : int = self.model(**snake_case )
return model_outputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowercase : Tuple = self.model.config.num_labels
if self.framework == "pt":
lowercase : str = model_outputs.logits.softmax(-1 )[0]
lowercase , lowercase : Dict = probs.topk(snake_case )
elif self.framework == "tf":
lowercase : Optional[int] = stable_softmax(model_outputs.logits ,axis=-1 )[0]
lowercase : Union[str, Any] = tf.math.top_k(snake_case ,k=snake_case )
lowercase , lowercase : List[str] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
lowercase : Tuple = scores.tolist()
lowercase : Dict = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(snake_case ,snake_case )]
| 20 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: str , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int]=7 , UpperCAmelCase_: Optional[int]=3 , UpperCAmelCase_: Any=18 , UpperCAmelCase_: Optional[int]=30 , UpperCAmelCase_: Any=400 , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Any=None , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: Union[str, Any]=True , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 20}
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = do_center_crop
_SCREAMING_SNAKE_CASE = crop_size
_SCREAMING_SNAKE_CASE = do_flip_channel_order
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Any = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_center_crop""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """center_crop""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_flip_channel_order""" ) )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 351 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __UpperCAmelCase :
def __init__( self: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: str=True , UpperCAmelCase_: List[str]=99 , UpperCAmelCase_: int=32 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Dict=37 , UpperCAmelCase_: Tuple="gelu" , UpperCAmelCase_: Any=0.1 , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Optional[int]=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[int]=0.02 , UpperCAmelCase_: Union[str, Any]=3 , UpperCAmelCase_: Optional[int]=4 , UpperCAmelCase_: Tuple=None , UpperCAmelCase_: Any=0 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = projection_dim
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
_SCREAMING_SNAKE_CASE = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRContextEncoder(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any , UpperCAmelCase_: Dict , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Dict , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRReader(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Dict = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__snake_case : Optional[Any] = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__snake_case : str = False
__snake_case : List[Any] = False
__snake_case : Any = False
__snake_case : List[Any] = False
__snake_case : Tuple = False
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase_ )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRReader.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
_SCREAMING_SNAKE_CASE = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 125 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowercase__ ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(lowercase__ ) == 1:
return True
_lowerCamelCase : List[Any] = series[1] - series[0]
for index in range(len(lowercase__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _snake_case ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowercase__ ) == 0:
raise ValueError('Input list must be a non empty list' )
_lowerCamelCase : Optional[int] = 0
for val in series:
answer += val
return answer / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
from __future__ import annotations
from math import gcd
def __lowerCamelCase ( __magic_name__ : int , __magic_name__ : int = 2 , __magic_name__ : int = 1 , __magic_name__ : int = 3 , ):
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> int:
return (pow(__magic_name__ , 2 ) + step) % modulus
for _ in range(__magic_name__ ):
# These track the position within the cycle detection logic.
a__: List[Any] =seed
a__: Optional[int] =seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
a__: List[Any] =rand_fn(__magic_name__ , __magic_name__ , __magic_name__ )
a__: Tuple =rand_fn(__magic_name__ , __magic_name__ , __magic_name__ )
a__: Tuple =rand_fn(__magic_name__ , __magic_name__ , __magic_name__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
a__: Optional[Any] =gcd(hare - tortoise , __magic_name__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
a__: Dict =hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
__UpperCAmelCase = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 42 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__UpperCAmelCase = random.Random()
def __lowerCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any]=1.0 , __magic_name__ : Any=None , __magic_name__ : int=None ):
if rng is None:
a__: str =global_rng
a__: Dict =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : str , _a : Any , _a : Union[str, Any]=7 , _a : Any=4_0_0 , _a : Optional[int]=2_0_0_0 , _a : Tuple=1 , _a : str=0.0 , _a : Optional[Any]=1_6_0_0_0 , _a : List[str]=True , _a : Dict=8_0 , _a : List[str]=1_6 , _a : Union[str, Any]=6_4 , _a : Any="hann_window" , _a : Dict=8_0 , _a : int=7_6_0_0 , _a : List[str]=1e-10 , _a : int=True , ):
a__: Tuple =parent
a__: Optional[int] =batch_size
a__: Dict =min_seq_length
a__: Any =max_seq_length
a__: Dict =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__: str =feature_size
a__: List[Any] =padding_value
a__: Union[str, Any] =sampling_rate
a__: List[str] =do_normalize
a__: Any =num_mel_bins
a__: Dict =hop_length
a__: Optional[int] =win_length
a__: List[Any] =win_function
a__: int =fmin
a__: List[str] =fmax
a__: List[str] =mel_floor
a__: str =return_attention_mask
def _lowerCamelCase ( self : Union[str, Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowerCamelCase ( self : str , _a : List[str]=False , _a : Optional[Any]=False ):
def _flatten(_a : List[str] ):
return list(itertools.chain(*_a ) )
if equal_length:
a__: int =floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a__: Any =[
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a__: Optional[int] =[np.asarray(_a ) for x in speech_inputs]
return speech_inputs
def _lowerCamelCase ( self : int , _a : Union[str, Any]=False , _a : str=False ):
if equal_length:
a__: Union[str, Any] =[floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a__: Optional[Any] =[
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a__: Optional[int] =[np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCamelCase__ ( _a , unittest.TestCase ):
_lowerCAmelCase = SpeechTaFeatureExtractor
def _lowerCamelCase ( self : Optional[Any] ):
a__: List[str] =SpeechTaFeatureExtractionTester(self )
def _lowerCamelCase ( self : Optional[int] , _a : Union[str, Any] ):
self.assertTrue(np.all(np.mean(_a , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a , axis=0 ) - 1 ) < 1e-3 ) )
def _lowerCamelCase ( self : Union[str, Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
a__: Union[str, Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__: Union[str, Any] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: Union[str, Any] =[np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
a__: Union[str, Any] =feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
a__: List[Any] =feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
a__: Tuple =feat_extract(_a , return_tensors="np" ).input_values
a__: int =feat_extract(_a , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def _lowerCamelCase ( self : int ):
a__: Optional[int] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__: Union[str, Any] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: Tuple =["longest", "max_length", "do_not_pad"]
a__: List[str] =[None, 1_6_0_0, None]
for max_length, padding in zip(_a , _a ):
a__: Optional[int] =feat_extract(_a , padding=_a , max_length=_a , return_tensors="np" )
a__: Optional[int] =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowerCamelCase ( self : Any ):
a__: List[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__: Tuple =range(8_0_0 , 1_4_0_0 , 2_0_0 )
a__: List[Any] =[floats_list((1, x) )[0] for x in lengths]
a__: Optional[int] =["longest", "max_length", "do_not_pad"]
a__: Optional[Any] =[None, 1_6_0_0, None]
for max_length, padding in zip(_a , _a ):
a__: List[Any] =feat_extract(_a , max_length=_a , padding=_a )
a__: Optional[Any] =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowerCamelCase ( self : str ):
a__: List[str] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__: str =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: int =feat_extract(
_a , truncation=_a , max_length=1_0_0_0 , padding="max_length" , return_tensors="np" )
a__: Any =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowerCamelCase ( self : Optional[int] ):
a__: Optional[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__: Dict =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: int =feat_extract(
_a , truncation=_a , max_length=1_0_0_0 , padding="longest" , return_tensors="np" )
a__: int =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
a__: int =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: Tuple =feat_extract(
_a , truncation=_a , max_length=2_0_0_0 , padding="longest" , return_tensors="np" )
a__: Optional[int] =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def _lowerCamelCase ( self : Any ):
a__: List[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__: Dict =np.random.rand(1_0_0 ).astype(np.floataa )
a__: str =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__: int =feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a__: int =feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowerCamelCase ( self : Any ):
# Tests that all call wrap to encode_plus and batch_encode_plus
a__: int =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__: Optional[int] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: Any =[np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
a__: Dict =feature_extractor(audio_target=_a , padding=_a , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a__: Tuple =feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
a__: str =feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
a__: List[Any] =feature_extractor(_a , return_tensors="np" ).input_values
a__: Union[str, Any] =feature_extractor(_a , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
a__: Optional[int] =[floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
a__: str =np.asarray(_a )
a__: int =feature_extractor(_a , return_tensors="np" ).input_values
a__: int =feature_extractor(_a , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def _lowerCamelCase ( self : str ):
a__: str =self.feat_extract_tester.prepare_inputs_for_target()
a__: Dict =self.feature_extraction_class(**self.feat_extract_dict )
a__: int =feat_extract.model_input_names[0]
a__: Optional[Any] =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_a ) == len(_a ) for x, y in zip(_a , processed_features[input_name] ) ) )
a__: Any =self.feat_extract_tester.prepare_inputs_for_target(equal_length=_a )
a__: List[str] =BatchFeature({input_name: speech_inputs} , tensor_type="np" )
a__: List[Any] =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a__: Tuple =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
a__: Tuple =self.feat_extract_tester.prepare_inputs_for_target(equal_length=_a )
a__: int =self.feature_extraction_class(**self.feat_extract_dict )
a__: Tuple =feat_extract.model_input_names[0]
a__: Optional[int] =BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
a__: Optional[int] =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a__: Optional[Any] =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowerCamelCase ( self : str ):
a__: List[Any] =self.feature_extraction_class(**self.feat_extract_dict )
a__: Tuple =self.feat_extract_tester.prepare_inputs_for_target()
a__: Optional[int] =feat_extract.model_input_names[0]
a__: Optional[int] =BatchFeature({input_name: speech_inputs} )
a__: Tuple =feat_extract.num_mel_bins # hack!
a__: int =feat_extract.pad(_a , padding="longest" , return_tensors="np" )[input_name]
a__: Dict =feat_extract.pad(_a , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCamelCase ( self : List[str] ):
a__: Dict =self.feat_extract_dict
a__: Optional[Any] =True
a__: Any =self.feature_extraction_class(**_a )
a__: List[str] =self.feat_extract_tester.prepare_inputs_for_target()
a__: Optional[Any] =[len(_a ) for x in speech_inputs]
a__: Any =feat_extract.model_input_names[0]
a__: Union[str, Any] =BatchFeature({input_name: speech_inputs} )
a__: List[Any] =feat_extract.num_mel_bins # hack!
a__: Any =feat_extract.pad(_a , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , _a )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _a )
def _lowerCamelCase ( self : Any ):
a__: Tuple =self.feat_extract_dict
a__: Dict =True
a__: Tuple =self.feature_extraction_class(**_a )
a__: str =self.feat_extract_tester.prepare_inputs_for_target()
a__: List[str] =[len(_a ) for x in speech_inputs]
a__: Optional[int] =feat_extract.model_input_names[0]
a__: Optional[Any] =BatchFeature({input_name: speech_inputs} )
a__: List[str] =min(_a )
a__: Optional[int] =feat_extract.num_mel_bins # hack!
a__: Optional[Any] =feat_extract.pad(
_a , padding="max_length" , max_length=_a , truncation=_a , return_tensors="np" )
self.assertIn("attention_mask" , _a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _lowerCamelCase ( self : List[str] , _a : List[str] ):
from datasets import load_dataset
a__: List[Any] =load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a__: Union[str, Any] =ds.sort("id" ).select(range(_a ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self : Tuple ):
# fmt: off
a__: Optional[Any] =torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
a__: Tuple =self._load_datasamples(1 )
a__: Tuple =SpeechTaFeatureExtractor()
a__: Any =feature_extractor(_a , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , _a , atol=1e-6 ) )
def _lowerCamelCase ( self : int ):
# fmt: off
a__: List[Any] =torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
a__: List[Any] =self._load_datasamples(1 )
a__: List[str] =SpeechTaFeatureExtractor()
a__: Any =feature_extractor(audio_target=_a , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _a , atol=1e-4 ) )
| 42 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = """gpt_neox"""
def __init__( self : str , UpperCamelCase : Dict=50_432 , UpperCamelCase : Optional[int]=6_144 , UpperCamelCase : Union[str, Any]=44 , UpperCamelCase : List[Any]=64 , UpperCamelCase : Dict=24_576 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : Optional[int]=0.25 , UpperCamelCase : int=10_000 , UpperCamelCase : List[Any]=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Optional[int]=2_048 , UpperCamelCase : Union[str, Any]=0.02 , UpperCamelCase : Tuple=1e-5 , UpperCamelCase : List[str]=True , UpperCamelCase : Tuple=0 , UpperCamelCase : List[Any]=2 , UpperCamelCase : Optional[Any]=False , UpperCamelCase : str=True , UpperCamelCase : Union[str, Any]=None , **UpperCamelCase : str , ):
'''simple docstring'''
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Union[str, Any] = rotary_pct
__UpperCAmelCase : Any = rotary_emb_base
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : Optional[int] = hidden_dropout
__UpperCAmelCase : List[str] = classifier_dropout
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : Any = tie_word_embeddings
__UpperCAmelCase : Optional[Any] = use_parallel_residual
__UpperCAmelCase : Dict = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
__UpperCAmelCase : Dict = self.rope_scaling.get("""type""" , _a )
__UpperCAmelCase : int = self.rope_scaling.get("""factor""" , _a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_a , _a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 115 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
snake_case : Dict = re.compile(R"\b(a|an|the)\b", re.UNICODE)
snake_case : Optional[int] = None
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Any = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=_snake_case , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=_snake_case , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCAmelCase_ ( _snake_case : Optional[Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__magic_name__ : str = bool(qa["answers"]["text"] )
return qid_to_has_ans
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
def remove_articles(_snake_case : List[str] ):
return ARTICLES_REGEX.sub(" " , _snake_case )
def white_space_fix(_snake_case : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_snake_case : Optional[int] ):
__magic_name__ : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_snake_case : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_snake_case ) ) ) )
def lowerCAmelCase_ ( _snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(_snake_case ).split()
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Dict ) -> Tuple:
'''simple docstring'''
return int(normalize_answer(_snake_case ) == normalize_answer(_snake_case ) )
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : int ) -> str:
'''simple docstring'''
__magic_name__ : Any = get_tokens(_snake_case )
__magic_name__ : Optional[int] = get_tokens(_snake_case )
__magic_name__ : Tuple = collections.Counter(_snake_case ) & collections.Counter(_snake_case )
__magic_name__ : Tuple = sum(common.values() )
if len(_snake_case ) == 0 or len(_snake_case ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__magic_name__ : Dict = 1.0 * num_same / len(_snake_case )
__magic_name__ : Optional[Any] = 1.0 * num_same / len(_snake_case )
__magic_name__ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = {}
__magic_name__ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__magic_name__ : Union[str, Any] = qa["id"]
__magic_name__ : Any = [t for t in qa["answers"]["text"] if normalize_answer(_snake_case )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__magic_name__ : Tuple = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
__magic_name__ : Any = preds[qid]
# Take max over all gold answers
__magic_name__ : List[Any] = max(compute_exact(_snake_case , _snake_case ) for a in gold_answers )
__magic_name__ : int = max(compute_fa(_snake_case , _snake_case ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : str = {}
for qid, s in scores.items():
__magic_name__ : Dict = na_probs[qid] > na_prob_thresh
if pred_na:
__magic_name__ : str = float(not qid_to_has_ans[qid] )
else:
__magic_name__ : Optional[int] = s
return new_scores
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Tuple=None ) -> Tuple:
'''simple docstring'''
if not qid_list:
__magic_name__ : Any = len(_snake_case )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
__magic_name__ : Tuple = len(_snake_case )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : str , _snake_case : str ) -> Dict:
'''simple docstring'''
for k in new_eval:
__magic_name__ : int = new_eval[k]
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ) -> str:
'''simple docstring'''
plt.step(_snake_case , _snake_case , color="b" , alpha=0.2 , where="post" )
plt.fill_between(_snake_case , _snake_case , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_snake_case )
plt.savefig(_snake_case )
plt.clf()
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : Any , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]=None , _snake_case : int=None ) -> str:
'''simple docstring'''
__magic_name__ : Union[str, Any] = sorted(_snake_case , key=lambda _snake_case : na_probs[k] )
__magic_name__ : Optional[int] = 0.0
__magic_name__ : str = 1.0
__magic_name__ : str = 0.0
__magic_name__ : List[str] = [1.0]
__magic_name__ : str = [0.0]
__magic_name__ : Optional[Any] = 0.0
for i, qid in enumerate(_snake_case ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__magic_name__ : List[str] = true_pos / float(i + 1 )
__magic_name__ : Any = true_pos / float(_snake_case )
if i == len(_snake_case ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_snake_case )
recalls.append(_snake_case )
if out_image:
plot_pr_curve(_snake_case , _snake_case , _snake_case , _snake_case )
return {"ap": 100.0 * avg_prec}
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(_snake_case ):
os.makedirs(_snake_case )
__magic_name__ : Any = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__magic_name__ : str = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
__magic_name__ : Union[str, Any] = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
__magic_name__ : str = {k: float(_snake_case ) for k, v in qid_to_has_ans.items()}
__magic_name__ : str = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(_snake_case , _snake_case , "pr_exact" )
merge_eval(_snake_case , _snake_case , "pr_f1" )
merge_eval(_snake_case , _snake_case , "pr_oracle" )
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
if not qid_list:
return
__magic_name__ : Dict = [na_probs[k] for k in qid_list]
__magic_name__ : str = np.ones_like(_snake_case ) / float(len(_snake_case ) )
plt.hist(_snake_case , weights=_snake_case , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(_snake_case , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : List[str] , _snake_case : Dict ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__magic_name__ : List[str] = num_no_ans
__magic_name__ : Dict = cur_score
__magic_name__ : Dict = 0.0
__magic_name__ : Any = sorted(_snake_case , key=lambda _snake_case : na_probs[k] )
for i, qid in enumerate(_snake_case ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__magic_name__ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
__magic_name__ : List[Any] = -1
else:
__magic_name__ : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
__magic_name__ : Optional[int] = cur_score
__magic_name__ : List[Any] = na_probs[qid]
return 100.0 * best_score / len(_snake_case ), best_thresh
def lowerCAmelCase_ ( _snake_case : int , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ : List[str] = find_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case )
__magic_name__ , __magic_name__ : int = find_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case )
__magic_name__ : Optional[int] = best_exact
__magic_name__ : List[Any] = exact_thresh
__magic_name__ : Dict = best_fa
__magic_name__ : Any = fa_thresh
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
with open(OPTS.data_file ) as f:
__magic_name__ : Optional[Any] = json.load(_snake_case )
__magic_name__ : List[Any] = dataset_json["data"]
with open(OPTS.pred_file ) as f:
__magic_name__ : Optional[Any] = json.load(_snake_case )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__magic_name__ : Any = json.load(_snake_case )
else:
__magic_name__ : Any = {k: 0.0 for k in preds}
__magic_name__ : str = make_qid_to_has_ans(_snake_case ) # maps qid to True/False
__magic_name__ : Tuple = [k for k, v in qid_to_has_ans.items() if v]
__magic_name__ : Optional[Any] = [k for k, v in qid_to_has_ans.items() if not v]
__magic_name__ , __magic_name__ : Union[str, Any] = get_raw_scores(_snake_case , _snake_case )
__magic_name__ : Optional[Any] = apply_no_ans_threshold(_snake_case , _snake_case , _snake_case , OPTS.na_prob_thresh )
__magic_name__ : Optional[Any] = apply_no_ans_threshold(_snake_case , _snake_case , _snake_case , OPTS.na_prob_thresh )
__magic_name__ : List[Any] = make_eval_dict(_snake_case , _snake_case )
if has_ans_qids:
__magic_name__ : int = make_eval_dict(_snake_case , _snake_case , qid_list=_snake_case )
merge_eval(_snake_case , _snake_case , "HasAns" )
if no_ans_qids:
__magic_name__ : List[Any] = make_eval_dict(_snake_case , _snake_case , qid_list=_snake_case )
merge_eval(_snake_case , _snake_case , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , OPTS.out_image_dir )
histogram_na_prob(_snake_case , _snake_case , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(_snake_case , _snake_case , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(_snake_case , _snake_case )
else:
print(json.dumps(_snake_case , indent=2 ) )
if __name__ == "__main__":
snake_case : int = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 281 | 0 |
def _UpperCamelCase ( snake_case__ ) -> bool:
__UpperCAmelCase : Any = 0
for ch in input_str:
__UpperCAmelCase : Optional[Any] = ord(snake_case__ )
__UpperCAmelCase : Union[str, Any] = pow(2, snake_case__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 | import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Union[str, Any] = num_stages
__UpperCAmelCase : List[str] = hidden_sizes
__UpperCAmelCase : Any = depths
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : List[str] = out_features
__UpperCAmelCase : Tuple = out_indices
__UpperCAmelCase : List[Any] = scope
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__: str = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Tuple = False
lowerCamelCase__: int = False
lowerCamelCase__: Dict = False
lowerCamelCase__: int = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Dict ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: List[Any] ) -> int:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _lowerCamelCase ( self: Any ) -> Any:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _lowerCamelCase ( self: str ) -> Optional[Any]:
pass
def _lowerCamelCase ( self: List[Any] ) -> int:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : Optional[Any] = True
if model_class.__name__ in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]:
continue
__UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
__UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCAmelCase : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowerCamelCase )
__UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> Dict:
def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ):
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Dict ) -> List[Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) -> List[Any]:
__UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : str = model(**__lowerCamelCase )
# verify the logits
__UpperCAmelCase : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 0 |
"""simple docstring"""
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : List[str] = len(_lowerCAmelCase ) + 1
lowercase__ : Any = len(_lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowercase__ : List[str] = [[0 for i in range(_lowerCAmelCase )] for j in range(_lowerCAmelCase )]
# since string of zero length match pattern of zero length
lowercase__ : Any = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _lowerCAmelCase ):
lowercase__ : Tuple = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _lowerCAmelCase ):
lowercase__ : Union[str, Any] = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _lowerCAmelCase ):
for j in range(1 , _lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowercase__ : List[Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowercase__ : Union[str, Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowercase__ : Tuple = dp[i - 1][j]
else:
lowercase__ : Tuple = 0
else:
lowercase__ : List[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCamelCase : Any = "aab"
_UpperCamelCase : int = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 77 | import math
import qiskit
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 ):
'''simple docstring'''
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
__UpperCamelCase :List[str] = qiskit.QuantumRegister(4 , '''qr''' )
__UpperCamelCase :str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
__UpperCamelCase :Tuple = [input_a, input_a, carry_in]
__UpperCamelCase :Optional[int] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__UpperCamelCase :Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCamelCase :Tuple = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 43 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __snake_case :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=10 , snake_case__=3 , snake_case__=2 , snake_case__=2 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.02 , snake_case__="divided_space_time" , snake_case__=None , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =parent
UpperCAmelCase : Union[str, Any] =batch_size
UpperCAmelCase : Dict =image_size
UpperCAmelCase : str =num_channels
UpperCAmelCase : str =patch_size
UpperCAmelCase : Any =num_frames
UpperCAmelCase : Optional[Any] =is_training
UpperCAmelCase : int =use_labels
UpperCAmelCase : Tuple =hidden_size
UpperCAmelCase : Dict =num_hidden_layers
UpperCAmelCase : int =num_attention_heads
UpperCAmelCase : Tuple =intermediate_size
UpperCAmelCase : List[str] =hidden_act
UpperCAmelCase : Any =hidden_dropout_prob
UpperCAmelCase : str =attention_probs_dropout_prob
UpperCAmelCase : Optional[Any] =attention_type
UpperCAmelCase : str =initializer_range
UpperCAmelCase : List[Any] =scope
UpperCAmelCase : Union[str, Any] =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
UpperCAmelCase : Union[str, Any] =(image_size // patch_size) ** 2
UpperCAmelCase : str =(num_frames) * self.num_patches_per_frame + 1
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[Any] =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] =None
if self.use_labels:
UpperCAmelCase : Union[str, Any] =ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Dict =self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[Any] =TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
UpperCAmelCase : Optional[Any] =self.num_labels
return config
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Any:
'''simple docstring'''
UpperCAmelCase : str =TimesformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase : Union[str, Any] =model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =TimesformerForVideoClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase : List[str] =model(snake_case__ )
# verify the logits shape
UpperCAmelCase : Any =torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , snake_case__ )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Tuple =self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] =config_and_inputs
UpperCAmelCase : List[Any] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__lowerCamelCase : Dict = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__lowerCamelCase : List[str] = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : int = False
__lowerCamelCase : Any = False
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[int] =TimesformerModelTester(self )
UpperCAmelCase : Dict =ConfigTester(
self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__=False ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =copy.deepcopy(snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
UpperCAmelCase : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] =model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase : int =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : int =model_class(snake_case__ )
UpperCAmelCase : Union[str, Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Dict =[*signature.parameters.keys()]
UpperCAmelCase : Tuple =['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*snake_case__ )
@slow
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : List[str] =TimesformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
UpperCAmelCase , UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict =True
for model_class in self.all_model_classes:
UpperCAmelCase : Dict =self.model_tester.seq_length
UpperCAmelCase : Union[str, Any] =self.model_tester.num_frames
UpperCAmelCase : List[str] =True
UpperCAmelCase : Optional[int] =False
UpperCAmelCase : Tuple =True
UpperCAmelCase : List[str] =model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
UpperCAmelCase : Tuple =model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
UpperCAmelCase : Optional[Any] =outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase : Tuple =True
UpperCAmelCase : str =model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
UpperCAmelCase : Optional[Any] =model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
UpperCAmelCase : Tuple =outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
UpperCAmelCase : List[str] =len(snake_case__ )
# Check attention is always last and order is fine
UpperCAmelCase : Dict =True
UpperCAmelCase : str =True
UpperCAmelCase : int =model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
UpperCAmelCase : Optional[Any] =model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
self.assertEqual(out_len + 1 , len(snake_case__ ) )
UpperCAmelCase : Optional[Any] =outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
UpperCAmelCase : List[str] =model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
UpperCAmelCase : Tuple =model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
UpperCAmelCase : Union[str, Any] =outputs.hidden_states
UpperCAmelCase : Dict =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(snake_case__ ) , snake_case__ )
UpperCAmelCase : Optional[int] =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase , UpperCAmelCase : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str =True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Any =True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def lowerCAmelCase_ ( )-> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
UpperCAmelCase : Any =np.load(__lowerCAmelCase )
return list(__lowerCAmelCase )
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Tuple =TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
snake_case__ )
UpperCAmelCase : List[Any] =self.default_image_processor
UpperCAmelCase : Union[str, Any] =prepare_video()
UpperCAmelCase : str =image_processor(video[:8] , return_tensors='''pt''' ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase : List[Any] =model(**snake_case__ )
# verify the logits
UpperCAmelCase : int =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , snake_case__ )
UpperCAmelCase : Any =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 78 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 78 | 1 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__A : Union[str, Any] = ["text", "image", "audio"]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
inputs.append(create_inputs(UpperCamelCase__ ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = []
for output in outputs:
if isinstance(UpperCamelCase__ , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(UpperCamelCase__ , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(UpperCamelCase__ , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class A_ :
def _lowercase ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
UpperCAmelCase = self.tool.inputs
for _input in inputs:
if isinstance(_input , _A ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCAmelCase = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = create_inputs(self.tool.inputs )
UpperCAmelCase = self.tool(*_A )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCAmelCase = [outputs]
self.assertListEqual(output_types(_A ) , self.tool.outputs )
def _lowercase ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = create_inputs(self.tool.inputs )
UpperCAmelCase = self.tool(*_A )
if not isinstance(_A , _A ):
UpperCAmelCase = [outputs]
self.assertEqual(len(_A ) , len(self.tool.outputs ) )
for output, output_type in zip(_A , self.tool.outputs ):
UpperCAmelCase = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_A , _A ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = create_inputs(self.tool.inputs )
UpperCAmelCase = []
for _input, input_type in zip(_A , self.tool.inputs ):
if isinstance(_A , _A ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCAmelCase = self.tool(*_A )
if not isinstance(_A , _A ):
UpperCAmelCase = [outputs]
self.assertEqual(len(_A ) , len(self.tool.outputs ) )
| 273 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__A : Dict = logging.get_logger(__name__)
__A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : Tuple = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
__A : List[Any] = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
__A : List[Any] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = SqueezeBertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
'''simple docstring'''
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(_A , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**_A )
UpperCAmelCase = do_lower_case
def _lowercase ( self , _A , _A=None ):
'''simple docstring'''
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 273 | 1 |
import os
import string
import sys
lowerCAmelCase__ :List[str] = 1 << 8
lowerCAmelCase__ :Any = {
"""tab""": ord('''\t'''),
"""newline""": ord('''\r'''),
"""esc""": 2_7,
"""up""": 6_5 + ARROW_KEY_FLAG,
"""down""": 6_6 + ARROW_KEY_FLAG,
"""right""": 6_7 + ARROW_KEY_FLAG,
"""left""": 6_8 + ARROW_KEY_FLAG,
"""mod_int""": 9_1,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 5_0,
"""delete""": 5_1,
"""pg_up""": 5_3,
"""pg_down""": 5_4,
}
lowerCAmelCase__ :Any = KEYMAP["""up"""]
lowerCAmelCase__ :Tuple = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCAmelCase__ :Tuple = []
lowerCAmelCase__ :str = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(1_0):
lowerCAmelCase__ :str = ord(str(i))
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
_UpperCAmelCase = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
_UpperCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_UpperCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_UpperCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
_UpperCAmelCase = chr(KEYMAP['esc'] )
except KeyError:
_UpperCAmelCase = cha[1]
else:
_UpperCAmelCase = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
_UpperCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_UpperCAmelCase = sys.stdin.fileno()
_UpperCAmelCase = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
_UpperCAmelCase = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
_UpperCAmelCase = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 366 |
def lowerCAmelCase__ ( a__: int ) -> None:
'''simple docstring'''
_UpperCAmelCase = generate_pascal_triangle(a__ )
for row_idx in range(a__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def lowerCAmelCase__ ( a__: int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
_UpperCAmelCase = []
for current_row_idx in range(a__ ):
_UpperCAmelCase = populate_current_row(a__ , a__ )
triangle.append(a__ )
return triangle
def lowerCAmelCase__ ( a__: list[list[int]] , a__: int ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_UpperCAmelCase , _UpperCAmelCase = 1, 1
for current_col_idx in range(1 , a__ ):
calculate_current_element(
a__ , a__ , a__ , a__ )
return current_row
def lowerCAmelCase__ ( a__: list[list[int]] , a__: list[int] , a__: int , a__: int , ) -> None:
'''simple docstring'''
_UpperCAmelCase = triangle[current_row_idx - 1][current_col_idx - 1]
_UpperCAmelCase = triangle[current_row_idx - 1][current_col_idx]
_UpperCAmelCase = above_to_left_elt + above_to_right_elt
def lowerCAmelCase__ ( a__: int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
_UpperCAmelCase = [[1]]
for row_index in range(1 , a__ ):
_UpperCAmelCase = [0] + result[-1] + [0]
_UpperCAmelCase = row_index + 1
# Calculate the number of distinct elements in a row
_UpperCAmelCase = sum(divmod(a__ , 2 ) )
_UpperCAmelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
_UpperCAmelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_UpperCAmelCase = row_first_half + row_second_half
result.append(a__ )
return result
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(a__: Callable , a__: int ) -> None:
_UpperCAmelCase = F'''{func.__name__}({value})'''
_UpperCAmelCase = timeit(F'''__main__.{call}''' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'''{call:38} -- {timing:.4f} seconds''' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(a__ , a__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 185 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class snake_case__ ( snake_case_ ):
def a__ ( self ):
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "num_attention_heads" ) )
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=640 , lowerCamelCase=4 , lowerCamelCase="silu" , lowerCamelCase=3 , lowerCamelCase=32 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=10 , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = last_hidden_size
__a = num_attention_heads
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
def a__ ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def a__ ( self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = MobileViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.num_labels
__a = MobileViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.num_labels
__a = MobileViTForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Dict = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_snake_case : List[Any] = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_snake_case : int = False
_snake_case : Optional[Any] = False
_snake_case : int = False
_snake_case : Dict = False
def a__ ( self ):
__a = MobileViTModelTester(self )
__a = MobileViTConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def a__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def a__ ( self ):
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def a__ ( self ):
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def a__ ( self ):
pass
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def a__ ( self ):
pass
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@slow
def a__ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowerCamelCase( ):
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def a__ ( self ):
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def a__ ( self ):
__a = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(lowerCamelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__a = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def a__ ( self ):
__a = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__a = model.to(lowerCamelCase )
__a = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase )
__a = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def a__ ( self ):
__a = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__a = model.to(lowerCamelCase )
__a = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
__a = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
| 261 | """simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__ ( snake_case_, snake_case_ ):
@register_to_config
def __init__( self , lowerCamelCase = 768 , ):
super().__init__()
__a = nn.Parameter(torch.zeros(1 , lowerCamelCase ) )
__a = nn.Parameter(torch.ones(1 , lowerCamelCase ) )
def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , ):
__a = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) )
__a = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) )
return self
def a__ ( self , lowerCamelCase ):
__a = (embeds - self.mean) * 1.0 / self.std
return embeds
def a__ ( self , lowerCamelCase ):
__a = (embeds * self.std) + self.mean
return embeds
| 261 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : Tuple =[True] * limit
_UpperCAmelCase : int =False
_UpperCAmelCase : Optional[int] =False
_UpperCAmelCase : Optional[Any] =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
_UpperCAmelCase : Any =i * 2
while index < limit:
_UpperCAmelCase : Optional[int] =False
_UpperCAmelCase : Optional[int] =index + i
_UpperCAmelCase : Any =[2]
for i in range(3 , __lowerCamelCase , 2 ):
if is_prime[i]:
primes.append(__lowerCamelCase )
return primes
def lowerCamelCase__ ( __lowerCamelCase : int = 1_0_0_0_0_0_0 ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =prime_sieve(__lowerCamelCase )
_UpperCAmelCase : Dict =0
_UpperCAmelCase : str =0
for i in range(len(__lowerCamelCase ) ):
for j in range(i + length , len(__lowerCamelCase ) ):
_UpperCAmelCase : Union[str, Any] =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
_UpperCAmelCase : Union[str, Any] =j - i
_UpperCAmelCase : List[Any] =sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 242 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowercase =logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , *snake_case , **snake_case) -> None:
'''simple docstring'''
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case)
| 242 | 1 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] ):
__a : Union[str, Any] = checkpoints.load_tax_checkpoint(lowerCAmelCase__ )
__a : Optional[Any] = flatten_dict(lowerCAmelCase__ )
return flax_params
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] ):
__a : int = {}
__a : str = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
__a : List[str] = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__a : int = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__a : str = new_key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__a : List[str] = new_key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__a : Any = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , lowerCAmelCase__ )
__a : Tuple = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__a : Tuple = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , lowerCAmelCase__ )
__a : int = flax_dict[key]
__a : Dict = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__a : Optional[Any] = torch.from_numpy(converted_dict[key].T )
else:
__a : Optional[int] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Any=False ):
__a : str = get_flax_param(lowerCAmelCase__ )
if not use_large:
__a : Tuple = PixaStructVisionConfig()
__a : Union[str, Any] = PixaStructTextConfig()
else:
__a : Union[str, Any] = PixaStructVisionConfig(
hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 )
__a : Optional[int] = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 )
__a : Any = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=lowerCAmelCase__ )
__a : Tuple = PixaStructForConditionalGeneration(lowerCAmelCase__ )
__a : List[str] = rename_and_convert_flax_params(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
__a : Union[str, Any] = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
__a : int = PixaStructImageProcessor()
__a : Optional[Any] = PixaStructProcessor(image_processor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
if use_large:
__a : Optional[int] = 4_0_9_6
__a : Any = True
# mkdir if needed
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
print('''Model saved in {}'''.format(lowerCAmelCase__ ) )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
lowercase__ =parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 216 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Union[str, Any] = LEDConfig
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : int = "gelu"
def __init__(self : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any]=1_3 , snake_case_ : Optional[Any]=7 , snake_case_ : Any=True , snake_case_ : List[Any]=False , snake_case_ : str=9_9 , snake_case_ : Any=3_2 , snake_case_ : Dict=2 , snake_case_ : List[Any]=4 , snake_case_ : Optional[int]=3_7 , snake_case_ : Dict=0.1 , snake_case_ : int=0.1 , snake_case_ : Optional[Any]=2_0 , snake_case_ : Optional[Any]=2 , snake_case_ : Optional[int]=1 , snake_case_ : Optional[int]=0 , snake_case_ : str=4 , ):
__a : List[Any] = parent
__a : Union[str, Any] = batch_size
__a : List[str] = seq_length
__a : Any = is_training
__a : Tuple = use_labels
__a : List[Any] = vocab_size
__a : Optional[Any] = hidden_size
__a : int = num_hidden_layers
__a : Optional[int] = num_attention_heads
__a : int = intermediate_size
__a : Union[str, Any] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Tuple = eos_token_id
__a : Optional[Any] = pad_token_id
__a : List[str] = bos_token_id
__a : List[str] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__a : Union[str, Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__a : List[str] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def lowerCAmelCase (self : Optional[int] ):
__a : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__a : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__a : int = tf.concat([input_ids, eos_tensor] , axis=1 )
__a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__a : Optional[int] = prepare_led_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
__a : Dict = tf.concat(
[tf.zeros_like(snake_case_ )[:, :-1], tf.ones_like(snake_case_ )[:, -1:]] , axis=-1 , )
__a : Tuple = global_attention_mask
return config, inputs_dict
def lowerCAmelCase (self : List[Any] , snake_case_ : Dict , snake_case_ : int ):
__a : List[str] = TFLEDModel(config=snake_case_ ).get_decoder()
__a : Dict = inputs_dict['''input_ids''']
__a : Dict = input_ids[:1, :]
__a : Any = inputs_dict['''attention_mask'''][:1, :]
__a : List[str] = 1
# first forward pass
__a : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
__a , __a : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__a : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
__a : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__a : Optional[int] = model(snake_case_ , attention_mask=snake_case_ )[0]
__a : int = model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__a : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__a : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
__a : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1E-3 )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=None , ):
if attention_mask is None:
__a : Any = tf.cast(tf.math.not_equal(lowerCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__a : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__a : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__a : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCamelCase__ ( __lowercase ,__lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE : Tuple = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE : Optional[Any] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def lowerCAmelCase (self : Optional[int] ):
__a : List[str] = TFLEDModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=snake_case_ )
def lowerCAmelCase (self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase (self : Optional[Any] ):
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
def lowerCAmelCase (self : Any ):
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = tf.zeros_like(inputs_dict['''attention_mask'''] )
__a : Tuple = 2
__a : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
__a : List[str] = True
__a : Tuple = self.model_tester.seq_length
__a : Any = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(snake_case_ : Any ):
__a : str = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(snake_case_ : Optional[int] ):
__a : int = [t.numpy() for t in outputs.encoder_attentions]
__a : int = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__a : Dict = True
__a : Optional[Any] = False
__a : List[str] = False
__a : List[Any] = model_class(snake_case_ )
__a : List[str] = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
__a : List[str] = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
__a : List[str] = model_class(snake_case_ )
__a : int = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__a : List[Any] = True
__a : Dict = model_class(snake_case_ )
__a : Tuple = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
__a : List[str] = True
__a : Any = True
__a : Tuple = model_class(snake_case_ )
__a : int = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def lowerCAmelCase (self : List[str] ):
pass
def lowerCAmelCase (self : List[Any] ):
# TODO: Head-masking not yet implement
pass
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] ):
return tf.constant(lowerCAmelCase__ , dtype=tf.intaa )
lowercase__ =1e-4
@slow
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : Any ):
__a : Dict = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
__a : Union[str, Any] = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__a : Dict = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__a : List[str] = prepare_led_inputs_dict(model.config , snake_case_ , snake_case_ )
__a : List[str] = model(**snake_case_ )[0]
__a : Any = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , snake_case_ )
# change to expected output here
__a : Dict = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-3 )
def lowerCAmelCase (self : int ):
__a : Optional[Any] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
__a : int = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__a : Tuple = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__a : Dict = prepare_led_inputs_dict(model.config , snake_case_ , snake_case_ )
__a : List[str] = model(**snake_case_ )[0]
__a : List[Any] = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , snake_case_ )
# change to expected output here
__a : str = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-3 , rtol=1E-3 )
| 216 | 1 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _lowerCAmelCase ( lowercase ) -> tuple:
return (data["data"], data["target"])
def _lowerCAmelCase ( lowercase , lowercase ) -> XGBClassifier:
__lowerCAmelCase = XGBClassifier()
classifier.fit(_UpperCamelCase , _UpperCamelCase )
return classifier
def _lowerCAmelCase ( ) -> None:
__lowerCAmelCase = load_iris()
__lowerCAmelCase , __lowerCAmelCase = data_handling(_UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = train_test_split(
_UpperCamelCase , _UpperCamelCase , test_size=0.25 )
__lowerCAmelCase = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
__lowerCAmelCase = xgboost(_UpperCamelCase , _UpperCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , display_labels=_UpperCamelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 359 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = [False] * len(lowercase )
__lowerCAmelCase = [-1] * len(lowercase )
def dfs(lowercase , lowercase ):
__lowerCAmelCase = True
__lowerCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(lowercase , 1 - c )
for i in range(len(lowercase ) ):
if not visited[i]:
dfs(lowercase , 0 )
for i in range(len(lowercase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_a : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 46 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class _a ( __a ):
__a : int = """data2vec-text"""
def __init__( self : Tuple , lowercase : List[str]=30_522 , lowercase : Union[str, Any]=768 , lowercase : Dict=12 , lowercase : List[Any]=12 , lowercase : Union[str, Any]=3_072 , lowercase : Any="gelu" , lowercase : Optional[int]=0.1 , lowercase : str=0.1 , lowercase : List[str]=512 , lowercase : Optional[int]=2 , lowercase : int=0.02 , lowercase : int=1E-12 , lowercase : Union[str, Any]=1 , lowercase : List[str]=0 , lowercase : int=2 , lowercase : Tuple="absolute" , lowercase : Optional[int]=True , lowercase : Dict=None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
UpperCAmelCase = classifier_dropout
class _a ( __a ):
@property
def A ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 34 |
'''simple docstring'''
import os
from distutils.util import strtobool
def snake_case_ (_a : Union[str, Any] , _a : List[Any] ):
for e in env_keys:
UpperCAmelCase = int(os.environ.get(_a , -1 ) )
if val >= 0:
return val
return default
def snake_case_ (_a : Dict , _a : Any=False ):
UpperCAmelCase = os.environ.get(_a , str(_a ) )
return strtobool(_a ) == 1 # As its name indicates `strtobool` actually returns an int...
def snake_case_ (_a : str , _a : Optional[Any]="no" ):
UpperCAmelCase = os.environ.get(_a , str(_a ) )
return value
| 34 | 1 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = FileLock(str(tmpdir / 'foo.lock' ) )
UpperCAmelCase = FileLock(str(tmpdir / 'foo.lock' ) )
UpperCAmelCase = 0.0_1
with locka.acquire():
with pytest.raises(lowercase_ ):
UpperCAmelCase = time.time()
locka.acquire(lowercase_ )
assert time.time() - _start > timeout
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = 'a' * 1000 + '.lock'
UpperCAmelCase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(lowercase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCAmelCase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowercase_ ):
locka.acquire(0 )
| 360 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 181 | 0 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
if not numbers:
return 0
if not isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) or not all(
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
lowerCAmelCase : str = numbers[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# update the maximum and minimum subarray products
lowerCAmelCase : int = numbers[i]
if number < 0:
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = min_till_now, max_till_now
lowerCAmelCase : List[Any] = max(SCREAMING_SNAKE_CASE , max_till_now * number )
lowerCAmelCase : List[Any] = min(SCREAMING_SNAKE_CASE , min_till_now * number )
# update the maximum product found till now
lowerCAmelCase : str = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max_prod
| 108 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class __a (lowerCamelCase ):
__a : Union[str, Any] = "lilt"
def __init__( self : Any , __magic_name__ : Tuple=3_05_22 , __magic_name__ : str=7_68 , __magic_name__ : Tuple=12 , __magic_name__ : int=12 , __magic_name__ : str=30_72 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Any=5_12 , __magic_name__ : List[Any]=2 , __magic_name__ : Dict=0.0_2 , __magic_name__ : List[Any]=1E-12 , __magic_name__ : List[str]=0 , __magic_name__ : List[str]="absolute" , __magic_name__ : str=None , __magic_name__ : Dict=4 , __magic_name__ : str=10_24 , **__magic_name__ : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Tuple = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Any = layer_norm_eps
UpperCAmelCase_ : int = position_embedding_type
UpperCAmelCase_ : Tuple = classifier_dropout
UpperCAmelCase_ : Dict = channel_shrink_ratio
UpperCAmelCase_ : int = max_ad_position_embeddings
| 125 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a :Optional[Any] = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = original_name.split("." )[0]
A_ = key.split("." )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] )
A_ = orig_block_num - offset
A_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = OrderedDict()
A_ , A_ = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
A_ = key.replace("network" ,"poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
A_ = key[: key.find("proj" )]
A_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' )
A_ = key.replace("proj" ,"projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
A_ = "poolformer.encoder." + key
if "mlp.fc1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc1" ,"output.conv1" )
if "mlp.fc2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc2" ,"output.conv2" )
if "norm1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm1" ,"before_norm" )
if "norm2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm2" ,"after_norm" )
if "layer_scale_1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_1" ,"layer_scale_1" )
if "layer_scale_2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_2" ,"layer_scale_2" )
if "head" in key:
A_ = key.replace("head" ,"classifier" )
A_ = value
return new_state_dict
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = PoolFormerConfig()
# set attributes based on model_name
A_ = "huggingface/label-files"
A_ = model_name[-3:]
A_ = 1000
A_ = "imagenet-1k-id2label.json"
A_ = (1, 1000)
# set config attributes
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
if size == "s12":
A_ = [2, 2, 6, 2]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s24":
A_ = [4, 4, 12, 4]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s36":
A_ = [6, 6, 18, 6]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 1E-6
A_ = 0.9
elif size == "m36":
A_ = [6, 6, 18, 6]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
elif size == "m48":
A_ = [8, 8, 24, 8]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
# Prepare image
A_ = prepare_img()
A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
A_ = torch.load(__UpperCamelCase ,map_location=torch.device("cpu" ) )
# rename keys
A_ = rename_keys(__UpperCamelCase )
# create HuggingFace model and load state dict
A_ = PoolFormerForImageClassification(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Define image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" ).pixel_values
# forward pass
A_ = model(__UpperCamelCase )
A_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
A_ = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
A_ = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
A_ = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
A_ = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
A_ = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__a :int = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 329 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__a :str = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = RobertaPreLayerNormConfig.from_pretrained(
__UpperCamelCase ,architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A_ = torch.load(hf_hub_download(repo_id=__UpperCamelCase ,filename="pytorch_model.bin" ) )
A_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A_ = tensor_value
A_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__UpperCamelCase ,config=__UpperCamelCase ,state_dict=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
# convert tokenizer
A_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a :Any = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 329 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Union[str, Any] = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : Any = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["ChineseCLIPFeatureExtractor"]
lowercase : List[Any] = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if edge <= 0 or not isinstance(__a , __a ):
raise ValueError("""Length must be a positive.""" )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if edge <= 0 or not isinstance(__a , __a ):
raise ValueError("""Length must be a positive.""" )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase :
def __init__( self : str , __snake_case : Optional[int] , __snake_case : Dict=13 , __snake_case : Dict=32 , __snake_case : List[str]=2 , __snake_case : str=3 , __snake_case : str=16 , __snake_case : int=[1, 2, 1] , __snake_case : Dict=[2, 2, 4] , __snake_case : int=2 , __snake_case : str=2.0 , __snake_case : List[str]=True , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Dict=0.1 , __snake_case : Tuple="gelu" , __snake_case : str=False , __snake_case : Any=True , __snake_case : Union[str, Any]=0.02 , __snake_case : Union[str, Any]=1E-5 , __snake_case : Optional[Any]=True , __snake_case : Union[str, Any]=None , __snake_case : Any=True , __snake_case : Optional[Any]=10 , __snake_case : Tuple=8 , __snake_case : List[Any]=["stage1", "stage2", "stage3"] , __snake_case : Dict=[1, 2, 3] , ) -> List[str]:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = patch_norm
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = is_training
_lowerCAmelCase = scope
_lowerCAmelCase = use_labels
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = encoder_stride
_lowerCAmelCase = out_features
_lowerCAmelCase = out_indices
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : int ) -> Any:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase__ ( self : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[Any] ) -> Dict:
_lowerCAmelCase = MaskFormerSwinModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = model(__snake_case )
_lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase__ ( self : Optional[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : Optional[int] ) -> Union[str, Any]:
_lowerCAmelCase = MaskFormerSwinBackbone(config=__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = model(__snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__snake_case ):
_lowerCAmelCase = ["""stem"""]
_lowerCAmelCase = MaskFormerSwinBackbone(config=__snake_case )
def lowercase__ ( self : Union[str, Any] ) -> int:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
_lowercase: str = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_lowercase: List[Any] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
_lowercase: Optional[Any] = False
_lowercase: List[str] = False
_lowercase: Optional[Any] = False
_lowercase: str = False
_lowercase: Union[str, Any] = False
def lowercase__ ( self : Optional[Any] ) -> str:
_lowerCAmelCase = MaskFormerSwinModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=__snake_case , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def lowercase__ ( self : Tuple ) -> str:
pass
def lowercase__ ( self : str ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : str ) -> Any:
return
def lowercase__ ( self : Tuple ) -> Tuple:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowercase__ ( self : Optional[int] ) -> str:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case )
@unittest.skip("""Swin does not use inputs_embeds""" )
def lowercase__ ( self : str ) -> List[str]:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def lowercase__ ( self : Any ) -> Union[str, Any]:
pass
def lowercase__ ( self : Optional[Any] ) -> List[str]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def lowercase__ ( self : str ) -> Any:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__snake_case )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def lowercase__ ( self : Union[str, Any] ) -> Any:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def lowercase__ ( self : List[str] ) -> Any:
pass
def lowercase__ ( self : Tuple , __snake_case : Optional[int] , __snake_case : str , __snake_case : str , __snake_case : Optional[int] ) -> Optional[int]:
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# Swin has a different seq_length
_lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase__ ( self : List[str] ) -> Any:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_lowerCAmelCase = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase__ ( self : Any ) -> Any:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_lowerCAmelCase = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def lowercase__ ( self : List[Any] ) -> Any:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowercase__ ( self : Any ) -> str:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowercase__ ( self : Dict ) -> Optional[int]:
pass
def lowercase__ ( self : List[str] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__snake_case : List[str] ):
_lowerCAmelCase = 0
return t
def check_equivalence(__snake_case : Any , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Dict={} ):
with torch.no_grad():
_lowerCAmelCase = model(**__snake_case , return_dict=__snake_case , **__snake_case )
_lowerCAmelCase = model(**__snake_case , return_dict=__snake_case , **__snake_case ).to_tuple()
def recursive_check(__snake_case : int , __snake_case : Any ):
if isinstance(__snake_case , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__snake_case , __snake_case ):
recursive_check(__snake_case , __snake_case )
elif isinstance(__snake_case , __snake_case ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__snake_case , __snake_case )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__snake_case ) , set_nan_tensor_to_zero(__snake_case ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
f" {torch.isnan(__snake_case ).any()} and `inf`: {torch.isinf(__snake_case )}. Dict has"
f" `nan`: {torch.isnan(__snake_case ).any()} and `inf`: {torch.isinf(__snake_case )}."
) , )
recursive_check(__snake_case , __snake_case )
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case , {"""output_hidden_states""": True} )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case , {"""output_hidden_states""": True} )
@require_torch
class UpperCAmelCase ( unittest.TestCase , snake_case_ ):
_lowercase: int = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_lowercase: Dict = MaskFormerSwinConfig
def lowercase__ ( self : Union[str, Any] ) -> str:
_lowerCAmelCase = MaskFormerSwinModelTester(self )
def lowercase__ ( self : str ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
_lowerCAmelCase = backbone_class(__snake_case )
backbone.to(__snake_case )
backbone.eval()
_lowerCAmelCase = backbone(**__snake_case )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __snake_case )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_lowerCAmelCase = backbone(**__snake_case , output_hidden_states=__snake_case )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_lowerCAmelCase = backbone(**__snake_case , output_attentions=__snake_case )
self.assertIsNotNone(outputs.attentions )
| 220 | 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase__( lowercase : Any , lowercase : str ) -> Union[str, Any]:
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase__( lowercase : str , lowercase : int , lowercase : str ) -> Optional[int]:
__snake_case : Optional[int] = tmp_path / "cache"
__snake_case : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__snake_case : Union[str, Any] = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : Any , lowercase : List[str] ) -> Tuple:
__snake_case : Dict = tmp_path / "cache"
__snake_case : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__snake_case : Dict = features.copy() if features else default_expected_features
__snake_case : str = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case : Any = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : int , lowercase : Tuple ) -> Tuple:
__snake_case : str = tmp_path / "cache"
__snake_case : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__snake_case : Tuple = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase__( lowercase : int , lowercase : Optional[int] , lowercase : Dict ) -> str:
if issubclass(lowercase , lowercase ):
__snake_case : Optional[Any] = parquet_path
elif issubclass(lowercase , lowercase ):
__snake_case : str = [parquet_path]
__snake_case : Tuple = tmp_path / "cache"
__snake_case : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__snake_case : Union[str, Any] = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
def lowerCAmelCase__( lowercase : str , lowercase : Union[str, Any] , lowercase : Dict=("train",) ) -> Optional[Any]:
assert isinstance(lowercase , lowercase )
for split in splits:
__snake_case : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase__( lowercase : str , lowercase : Optional[int] , lowercase : Union[str, Any] ) -> Dict:
__snake_case : List[str] = tmp_path / "cache"
__snake_case : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__snake_case : Any = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : int , lowercase : List[Any] ) -> Dict:
__snake_case : Tuple = tmp_path / "cache"
__snake_case : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__snake_case : List[Any] = features.copy() if features else default_expected_features
__snake_case : Optional[int] = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case : Any = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : str , lowercase : Optional[Any] ) -> Optional[Any]:
if split:
__snake_case : int = {split: parquet_path}
else:
__snake_case : Dict = "train"
__snake_case : Optional[Any] = {"train": parquet_path, "test": parquet_path}
__snake_case : Tuple = tmp_path / "cache"
__snake_case : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__snake_case : Dict = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> Any:
__snake_case : str = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
__snake_case : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" )
__snake_case : Optional[Any] = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase__( lowercase : Any , lowercase : int ) -> int:
__snake_case : List[str] = str(shared_datadir / "test_image_rgb.jpg" )
__snake_case : Union[str, Any] = {"image": [image_path]}
__snake_case : List[str] = Features({"image": Image()} )
__snake_case : List[Any] = Dataset.from_dict(lowercase , features=lowercase )
__snake_case : Union[str, Any] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
__snake_case : Optional[int] = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
__snake_case : Dict = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase__( lowercase : Optional[int] , lowercase : Tuple ) -> Union[str, Any]:
assert get_writer_batch_size(lowercase ) == expected
| 326 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = torch.device('''cpu''')
def lowerCAmelCase__( ) -> Any:
__snake_case : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : Optional[int] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def lowerCAmelCase__( lowercase : Dict ) -> List[Any]:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def lowerCAmelCase__( lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> List[Any]:
__snake_case : List[Any] = dct.pop(lowercase )
__snake_case : List[Any] = val
def lowerCAmelCase__( lowercase : Union[str, Any] ) -> Tuple:
__snake_case : Optional[Any] = []
for k in state_dict.keys():
__snake_case : Union[str, Any] = k
if ".pwconv" in k:
__snake_case : Any = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
__snake_case : List[Any] = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
__snake_case : Optional[int] = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
__snake_case : int = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
__snake_case : int = k_new.split("." )
if ls[2].isdigit():
__snake_case : List[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
__snake_case : Optional[int] = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : List[str] ) -> Union[str, Any]:
__snake_case : List[str] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__snake_case : Tuple = 1000
__snake_case : Any = "huggingface/label-files"
__snake_case : int = "imagenet-1k-id2label.json"
__snake_case : Dict = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
__snake_case : str = {int(lowercase ): v for k, v in idalabel.items()}
__snake_case : int = idalabel
__snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__snake_case : Optional[Any] = [3, 3, 6, 4]
__snake_case : Optional[int] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__snake_case : List[str] = [3, 3, 9, 6]
__snake_case : Optional[Any] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__snake_case : Optional[int] = [4, 3, 10, 5]
__snake_case : Dict = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__snake_case : str = [4, 4, 12, 6]
__snake_case : Optional[Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
__snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" , check_hash=lowercase )
else:
__snake_case : Tuple = torch.load(lowercase , map_location="cpu" )
__snake_case : Optional[int] = checkpoint
__snake_case : Any = create_rename_keys(lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# load HuggingFace model
__snake_case : Tuple = SwiftFormerForImageClassification(lowercase ).eval()
hf_model.load_state_dict(lowercase )
# prepare test inputs
__snake_case : Optional[Any] = prepare_img()
__snake_case : str = ViTImageProcessor.from_pretrained("preprocessor_config" )
__snake_case : Optional[int] = processor(images=lowercase , return_tensors="pt" )
# compare outputs from both models
__snake_case : str = get_expected_output(lowercase )
__snake_case : Optional[int] = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , lowercase , atol=1E-3 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
_UpperCamelCase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 326 | 1 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__UpperCAmelCase : Any = getLogger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : int = 1_0_2_4 , SCREAMING_SNAKE_CASE_ : List[Any]="val" , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Dict="summarization" , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[Any]=1 , SCREAMING_SNAKE_CASE_ : Dict = None , SCREAMING_SNAKE_CASE_ : str="" , **SCREAMING_SNAKE_CASE_ : Any , ):
"""simple docstring"""
UpperCamelCase : Optional[int] = str(SCREAMING_SNAKE_CASE_ )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = Path(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = save_dir.joinpath(F"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).cuda()
if fpaa:
UpperCamelCase : str = model.half()
# determine if we need to increase num_beams
use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # update config with task specific params
UpperCamelCase : Union[str, Any] = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCamelCase : List[str] = num_return_sequences
UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCamelCase : Union[str, Any] = tokenizer.model_max_length
if prefix is None:
UpperCamelCase : List[str] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
UpperCamelCase : List[Any] = SeqaSeqDataset(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_target_length=1_0_2_4 , type_path=SCREAMING_SNAKE_CASE_ , n_obs=SCREAMING_SNAKE_CASE_ , prefix=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCamelCase : Dict = ds.make_sortish_sampler(SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , add_extra_examples=SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , collate_fn=ds.collate_fn )
UpperCamelCase : List[Any] = []
for batch in tqdm(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=SCREAMING_SNAKE_CASE_ , num_beams=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = batch['''ids''']
if num_return_sequences > 1:
UpperCamelCase : Optional[Any] = chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(SCREAMING_SNAKE_CASE_ ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return results, sampler.num_replicas
def a ( ):
"""simple docstring"""
UpperCamelCase : str = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=SCREAMING_SNAKE_CASE_ , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=SCREAMING_SNAKE_CASE_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=SCREAMING_SNAKE_CASE_ , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ )
parser.add_argument(
'''--type_path''' , type=SCREAMING_SNAKE_CASE_ , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=SCREAMING_SNAKE_CASE_ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=SCREAMING_SNAKE_CASE_ , default=1 , required=SCREAMING_SNAKE_CASE_ , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=SCREAMING_SNAKE_CASE_ , default=6_0_0 , required=SCREAMING_SNAKE_CASE_ , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ )
parser.add_argument('''--tgt_lang''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ )
parser.add_argument(
'''--prefix''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
UpperCamelCase : List[Any] = time.time()
UpperCamelCase : Optional[Any] = parser.parse_known_args()
UpperCamelCase : str = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_ )
if generate_kwargs and args.local_rank <= 0:
print(F"""parsed the following generate kwargs: {generate_kwargs}""" )
UpperCamelCase : Optional[Any] = Path(args.save_dir + '''_tmp''' )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) # this handles locking.
UpperCamelCase : List[Any] = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(F"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCamelCase : Union[str, Any] = {}
if args.src_lang is not None:
UpperCamelCase : str = args.src_lang
if args.tgt_lang is not None:
UpperCamelCase : Optional[int] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = eval_data_dir(
args.data_dir , SCREAMING_SNAKE_CASE_ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if args.local_rank <= 0:
UpperCamelCase : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = gather_results_from_each_node(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , args.sync_timeout )
UpperCamelCase : List[str] = combine_partial_results(SCREAMING_SNAKE_CASE_ )
if args.num_return_sequences > 1:
UpperCamelCase : Tuple = save_dir.joinpath('''pseudolabel_results.json''' )
print(F"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return
UpperCamelCase : List[str] = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(SCREAMING_SNAKE_CASE_ ) as f:
UpperCamelCase : Union[str, Any] = [x.rstrip() for x in f.readlines()][: len(SCREAMING_SNAKE_CASE_ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCamelCase : List[Any] = '''translation''' in args.task
UpperCamelCase : Dict = calculate_bleu if calc_bleu else calculate_rouge
UpperCamelCase : List[Any] = '''bleu''' if calc_bleu else '''rouge'''
UpperCamelCase : Dict = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = time.time() - start_time
UpperCamelCase : int = round(runtime / metrics['''n_obs'''] , 4 )
UpperCamelCase : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCamelCase : List[str] = save_dir.joinpath(F"""{args.type_path}_{metric_name}.json""" )
save_json(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
write_txt_file(SCREAMING_SNAKE_CASE_ , save_dir.joinpath(F"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(SCREAMING_SNAKE_CASE_ , save_dir.joinpath(F"""{args.type_path}.target""" ) )
else:
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = []
for partial_result in partial_results:
records.extend(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x["id"] )
UpperCamelCase : Union[str, Any] = [x['''pred'''] for x in records]
return preds
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = time.time()
logger.info('''waiting for all nodes to finish''' )
UpperCamelCase : int = None
while (time.time() - start_wait) < timeout:
UpperCamelCase : Optional[Any] = list(save_dir.glob('''rank_*.json''' ) )
if len(SCREAMING_SNAKE_CASE_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCamelCase : Union[str, Any] = lmap(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 358 |
import math
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Tuple = factor * value
UpperCamelCase : Optional[int] = value
while not is_prime(SCREAMING_SNAKE_CASE_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ )
return value
| 315 | 0 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
snake_case_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def _lowerCAmelCase ( ):
UpperCAmelCase = Github(os.environ['GITHUB_TOKEN'] )
UpperCAmelCase = g.get_repo('huggingface/accelerate' )
UpperCAmelCase = repo.get_issues(state='open' )
for issue in open_issues:
UpperCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase_ : i.created_at , reverse=lowercase_ )
UpperCAmelCase = comments[0] if len(lowercase_ ) > 0 else None
UpperCAmelCase = dt.utcnow()
UpperCAmelCase = (current_time - issue.updated_at).days
UpperCAmelCase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 78 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=8 ):
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _lowerCAmelCase ( lowercase_ , lowercase_=512 , lowercase_=512 ):
UpperCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase = arr.astype(np.floataa ) / 1_2_7.5 - 1
UpperCAmelCase = np.transpose(lowercase_ , [2, 0, 1] )
UpperCAmelCase = torch.from_numpy(lowercase_ ).unsqueeze(0 )
return image
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :UNetaDConditionModel , lowercase_ :DDPMScheduler , lowercase_ :VQModel , ) -> List[str]:
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :Any ) -> Optional[int]:
# get the original timestep using init_timestep
UpperCAmelCase = min(int(num_inference_steps * strength ) , lowercase_ )
UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Dict , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Optional[Any] , lowercase_ :Any=None ) -> Any:
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}""" )
UpperCAmelCase = image.to(device=lowercase_ , dtype=lowercase_ )
UpperCAmelCase = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase = image
else:
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ )
]
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
else:
UpperCAmelCase = self.movq.encode(lowercase_ ).latent_dist.sample(lowercase_ )
UpperCAmelCase = self.movq.config.scaling_factor * init_latents
UpperCAmelCase = torch.cat([init_latents] , dim=0 )
UpperCAmelCase = init_latents.shape
UpperCAmelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
UpperCAmelCase = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = init_latents
return latents
def UpperCAmelCase__ ( self :int , lowercase_ :int=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self :List[Any] ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self :str , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :int = 5_12 , lowercase_ :int = 5_12 , lowercase_ :int = 1_00 , lowercase_ :float = 4.0 , lowercase_ :float = 0.3 , lowercase_ :int = 1 , lowercase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , ) -> List[str]:
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase = image_embeds.shape[0]
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [image]
if not all(isinstance(lowercase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(lowercase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
UpperCAmelCase = torch.cat([prepare_image(lowercase_ , lowercase_ , lowercase_ ) for i in image] , dim=0 )
UpperCAmelCase = image.to(dtype=image_embeds.dtype , device=lowercase_ )
UpperCAmelCase = self.movq.encode(lowercase_ )['latents']
UpperCAmelCase = latents.repeat_interleave(lowercase_ , dim=0 )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase , UpperCAmelCase = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
UpperCAmelCase = self.prepare_latents(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , image_embeds.dtype , lowercase_ , lowercase_ )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'image_embeds': image_embeds}
UpperCAmelCase = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 78 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Union[List[PIL.Image.Image], np.ndarray]
__magic_name__ :Optional[List[bool]]
__magic_name__ :Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 254 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__magic_name__ :Optional[Any] = ["""onnx"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['onnx'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['onnx'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['onnx'] )
| 254 | 1 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
a : Optional[int] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
a : List[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __magic_name__ ( __UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
snake_case_ = []
for i in range(len(__UpperCAmelCase ) ):
snake_case_ = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
snake_case_ = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__UpperCAmelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__UpperCAmelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__UpperCAmelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
snake_case_ = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__UpperCAmelCase )
return next_generation
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> list[Image.Image]:
'''simple docstring'''
snake_case_ = []
for _ in range(__UpperCAmelCase ):
# Create output image
snake_case_ = Image.new('''RGB''', (len(cells[0] ), len(__UpperCAmelCase )) )
snake_case_ = img.load()
# Save cells to image
for x in range(len(__UpperCAmelCase ) ):
for y in range(len(cells[0] ) ):
snake_case_ = 255 - cells[y][x] * 255
snake_case_ = (colour, colour, colour)
# Save image
images.append(__UpperCAmelCase )
snake_case_ = new_generation(__UpperCAmelCase )
return images
if __name__ == "__main__":
a : Dict = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 56 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A__ : Dict = None
A__ : List[Any] = logging.get_logger(__name__)
A__ : str = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
A__ : Union[str, Any] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
A__ : Any = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
A__ : Dict = """▁"""
# Segments (not really needed)
A__ : List[str] = 0
A__ : List[Any] = 1
A__ : Union[str, Any] = 2
A__ : List[Any] = 3
A__ : str = 4
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : str = VOCAB_FILES_NAMES
lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] = 'left'
lowerCamelCase : Optional[Any] = XLNetTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<sep>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<cls>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=["<eop>", "<eod>"] , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , remove_space=SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : int = do_lower_case
__lowerCamelCase : Optional[Any] = remove_space
__lowerCamelCase : int = keep_accents
__lowerCamelCase : Any = vocab_file
__lowerCamelCase : Any = False if not self.vocab_file else True
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : Optional[Any] = [self.sep_token_id]
__lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : int = [self.sep_token_id]
__lowerCamelCase : Dict = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase : Tuple = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 185 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __A ( ) -> List[Any]:
__a : List[str] = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''')
__a : Any = parser.add_subparsers(help='''diffusers-cli command helpers''')
# Register commands
EnvironmentCommand.register_subcommand(a_)
# Let's go
__a : Tuple = parser.parse_args()
if not hasattr(a_ , '''func'''):
parser.print_help()
exit(1)
# Run
__a : Union[str, Any] = args.func(a_)
service.run()
if __name__ == "__main__":
main() | 188 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''openai/whisper-base'''
__lowerCAmelCase = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
__lowerCAmelCase = '''transcriber'''
__lowerCAmelCase = WhisperProcessor
__lowerCAmelCase = WhisperForConditionalGeneration
__lowerCAmelCase = ['''audio''']
__lowerCAmelCase = ['''text''']
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.pre_processor(_UpperCAmelCase , return_tensors='''pt''' ).input_features
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.model.generate(inputs=_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.pre_processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )[0] | 188 | 1 |
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowercase_ ( __UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : List[Any] = {}
lowerCAmelCase__ : Any = job["""started_at"""]
lowerCAmelCase__ : int = job["""completed_at"""]
lowerCAmelCase__ : Dict = date_parser.parse(__UpperCAmelCase )
lowerCAmelCase__ : int = date_parser.parse(__UpperCAmelCase )
lowerCAmelCase__ : int = round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowerCAmelCase__ : str = start
lowerCAmelCase__ : str = end
lowerCAmelCase__ : int = duration_in_min
return job_info
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> int:
lowerCAmelCase__ : Optional[Any] = None
if token is not None:
lowerCAmelCase__ : List[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
lowerCAmelCase__ : Dict = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase__ : List[Any] = requests.get(__UpperCAmelCase , headers=__UpperCAmelCase ).json()
lowerCAmelCase__ : Any = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(__UpperCAmelCase ) for job in result["""jobs"""]} )
lowerCAmelCase__ : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ : Any = requests.get(url + f"""&page={i + 2}""" , headers=__UpperCAmelCase ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(__UpperCAmelCase ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
_A = parser.parse_args()
_A = get_job_time(args.workflow_run_id)
_A = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v["duration"]}""")
| 242 |
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
def count_of_possible_combinations(__UpperCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
def count_of_possible_combinations_with_dp_array(
__UpperCAmelCase , __UpperCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCAmelCase__ : str = sum(
count_of_possible_combinations_with_dp_array(target - item , __UpperCAmelCase )
for item in array )
lowerCAmelCase__ : List[str] = answer
return answer
lowerCAmelCase__ : Dict = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
lowerCAmelCase__ : int = [0] * (target + 1)
lowerCAmelCase__ : int = 1
for i in range(1 , target + 1 ):
for j in range(__UpperCAmelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = 3
_A = 5
_A = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 242 | 1 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _a ( SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __magic_name__ ( __lowerCAmelCase):
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase__ : ArgumentParser ) -> Any:
'''simple docstring'''
UpperCamelCase__ : List[Any] = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=lowerCamelCase__ , default=lowerCamelCase__ , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=lowerCamelCase__ , help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : bool , lowerCamelCase__ : bool ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = model
UpperCamelCase__ : Optional[Any] = cache
UpperCamelCase__ : str = force
UpperCamelCase__ : int = trust_remote_code
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 358 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Any=False ):
"""simple docstring"""
UpperCamelCase__ : str = '''backbone.''' if is_semantic else ''''''
UpperCamelCase__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", '''beit.embeddings.cls_token'''),
(F"{prefix}patch_embed.proj.weight", '''beit.embeddings.patch_embeddings.projection.weight'''),
(F"{prefix}patch_embed.proj.bias", '''beit.embeddings.patch_embeddings.projection.bias'''),
(F"{prefix}pos_embed", '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : int=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
UpperCamelCase__ : Union[str, Any] = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCamelCase__ : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
UpperCamelCase__ : List[str] = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
UpperCamelCase__ : Tuple = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
UpperCamelCase__ : List[str] = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ : Optional[int] = q_bias
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCamelCase__ : List[Any] = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
UpperCamelCase__ : List[str] = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
UpperCamelCase__ : Any = gamma_a
UpperCamelCase__ : str = gamma_a
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = dct.pop(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = val
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=False ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = False if '''rvlcdip''' in checkpoint_url else True
UpperCamelCase__ : str = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE , use_mask_token=SCREAMING_SNAKE_CASE )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCamelCase__ : List[str] = 1024
UpperCamelCase__ : Union[str, Any] = 4096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCamelCase__ : Any = 16
UpperCamelCase__ : Optional[int] = '''huggingface/label-files'''
UpperCamelCase__ : Union[str, Any] = '''rvlcdip-id2label.json'''
UpperCamelCase__ : Dict = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ : Optional[Any] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ : int = idalabel
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ : Optional[int] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
UpperCamelCase__ : str = create_rename_keys(SCREAMING_SNAKE_CASE , has_lm_head=SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_lm_head=SCREAMING_SNAKE_CASE )
# load HuggingFace model
UpperCamelCase__ : Tuple = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image
UpperCamelCase__ : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = prepare_img()
UpperCamelCase__ : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
UpperCamelCase__ : Union[str, Any] = encoding['''pixel_values''']
UpperCamelCase__ : str = model(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = outputs.logits
# verify logits
UpperCamelCase__ : Dict = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
if has_lm_head:
UpperCamelCase__ : Any = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCamelCase__ : Optional[Any] = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__UpperCamelCase : Dict = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 51 | 0 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowercase__( nn.Module ):
"""simple docstring"""
a :int
a :jnp.dtype = jnp.floataa
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
lowercase_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = hidden_states.shape
lowercase_ = jax.image.resize(
SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
lowercase_ = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class lowercase__( nn.Module ):
"""simple docstring"""
a :int
a :jnp.dtype = jnp.floataa
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
lowercase_ = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class lowercase__( nn.Module ):
"""simple docstring"""
a :int
a :int = None
a :float = 0.0
a :bool = None
a :jnp.dtype = jnp.floataa
def _lowercase ( self : Any ) -> List[str]:
lowercase_ = self.in_channels if self.out_channels is None else self.out_channels
lowercase_ = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
lowercase_ = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase_ = nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
lowercase_ = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
lowercase_ = nn.Dropout(self.dropout_prob )
lowercase_ = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase_ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowercase_ = None
if use_nin_shortcut:
lowercase_ = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int]=True ) -> Tuple:
lowercase_ = hidden_states
lowercase_ = self.norma(SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.swish(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.conva(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , 1 )
lowercase_ = hidden_states + temb
lowercase_ = self.norma(SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.swish(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = self.conva(SCREAMING_SNAKE_CASE_ )
if self.conv_shortcut is not None:
lowercase_ = self.conv_shortcut(SCREAMING_SNAKE_CASE_ )
return hidden_states + residual
| 30 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = False, False, False
@dataclass
class lowercase :
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = None
# Automatically constructed
_SCREAMING_SNAKE_CASE = "dict"
_SCREAMING_SNAKE_CASE = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
_SCREAMING_SNAKE_CASE = field(default='Audio' , init=_UpperCAmelCase , repr=_UpperCAmelCase )
def __call__( self ) -> Union[str, Any]:
return self.pa_type
def _snake_case ( self , lowercase ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(lowercase , lowercase ):
return {"bytes": None, "path": value}
elif isinstance(lowercase , lowercase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCAmelCase = BytesIO()
sf.write(lowercase , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCAmelCase = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
lowerCAmelCase = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32_767
lowerCAmelCase = BytesIO(bytes() )
sf.write(lowercase , lowercase , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def _snake_case ( self , lowercase , lowercase = None ) -> dict:
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
lowerCAmelCase , lowerCAmelCase = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
lowerCAmelCase = xsplitext(lowercase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
lowerCAmelCase = token_per_repo_id or {}
lowerCAmelCase = path.split("""::""" )[-1]
try:
lowerCAmelCase = string_to_dict(lowercase , config.HUB_DATASETS_URL )["""repo_id"""]
lowerCAmelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCAmelCase = None
with xopen(lowercase , """rb""" , use_auth_token=lowercase ) as f:
lowerCAmelCase , lowerCAmelCase = sf.read(lowercase )
else:
lowerCAmelCase , lowerCAmelCase = sf.read(lowercase )
lowerCAmelCase = array.T
if self.mono:
lowerCAmelCase = librosa.to_mono(lowercase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCAmelCase = librosa.resample(lowercase , orig_sr=lowercase , target_sr=self.sampling_rate )
lowerCAmelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _snake_case ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _snake_case ( self , lowercase ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.binary() )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.string() )
lowerCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
lowerCAmelCase = pa.array([Audio().encode_example(lowercase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowerCAmelCase = storage.field("""bytes""" )
else:
lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowerCAmelCase = storage.field("""path""" )
else:
lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.string() )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(lowercase , self.pa_type )
def _snake_case ( self , lowercase ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(lowercase ):
with xopen(lowercase , """rb""" ) as f:
lowerCAmelCase = f.read()
return bytes_
lowerCAmelCase = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase = pa.array(
[os.path.basename(lowercase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase , self.pa_type )
| 46 | 0 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_snake_case = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
lowerCamelCase__ = None
lowerCamelCase__ = "utf-8"
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True # deprecated
lowerCamelCase__ = None # deprecated
lowerCamelCase__ = 10 << 20 # 10MB
lowerCamelCase__ = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
lowerCamelCase__ = JsonConfig
def snake_case__ ( self):
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead")
_lowerCAmelCase : int = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.")
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported")
return datasets.DatasetInfo(features=self.config.features)
def snake_case__ ( self, __a):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
_lowerCAmelCase : Tuple = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__a, (str, list, tuple)):
_lowerCAmelCase : Union[str, Any] = data_files
if isinstance(__a, __a):
_lowerCAmelCase : str = [files]
_lowerCAmelCase : int = [dl_manager.iter_files(__a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
_lowerCAmelCase : str = []
for split_name, files in data_files.items():
if isinstance(__a, __a):
_lowerCAmelCase : str = [files]
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files]
splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files}))
return splits
def snake_case__ ( self, __a):
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
_lowerCAmelCase : Optional[Any] = self.config.features.arrow_schema.field(__a).type
_lowerCAmelCase : str = pa_table.append_column(__a, pa.array([None] * len(__a), type=__a))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : List[Any] = table_cast(__a, self.config.features.arrow_schema)
return pa_table
def snake_case__ ( self, __a):
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__a)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__a, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
_lowerCAmelCase : Any = json.load(__a)
# We keep only the field we are interested in
_lowerCAmelCase : Union[str, Any] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__a, (list, tuple)):
_lowerCAmelCase : List[Any] = set().union(*[row.keys() for row in dataset])
_lowerCAmelCase : Any = {col: [row.get(__a) for row in dataset] for col in keys}
else:
_lowerCAmelCase : Dict = dataset
_lowerCAmelCase : Dict = pa.Table.from_pydict(__a)
yield file_idx, self._cast_table(__a)
# If the file has one json object per line
else:
with open(__a, "rb") as f:
_lowerCAmelCase : Tuple = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_lowerCAmelCase : List[Any] = max(self.config.chunksize // 32, 16 << 10)
_lowerCAmelCase : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
_lowerCAmelCase : Any = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__a)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_lowerCAmelCase : int = batch.decode(self.config.encoding, errors=__a).encode("utf-8")
try:
while True:
try:
_lowerCAmelCase : Optional[Any] = paj.read_json(
io.BytesIO(__a), read_options=paj.ReadOptions(block_size=__a))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__a, pa.ArrowInvalid)
and "straddling" not in str(__a)
or block_size > len(__a)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"Batch of {len(__a)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__a, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
_lowerCAmelCase : Dict = json.load(__a)
except json.JSONDecodeError:
logger.error(f"Failed to read file '{file}' with error {type(__a)}: {e}")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__a, __a): # list is the only sequence type supported in JSON
try:
_lowerCAmelCase : str = set().union(*[row.keys() for row in dataset])
_lowerCAmelCase : str = {col: [row.get(__a) for row in dataset] for col in keys}
_lowerCAmelCase : List[str] = pa.Table.from_pydict(__a)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"Failed to read file '{file}' with error {type(__a)}: {e}")
raise ValueError(f"Not able to read records in the JSON file at {file}.") from None
yield file_idx, self._cast_table(__a)
break
else:
logger.error(f"Failed to read file '{file}' with error {type(__a)}: {e}")
raise ValueError(
f"Not able to read records in the JSON file at {file}. "
f"You should probably indicate the field of the JSON file containing your records. "
f"This JSON file contain the following fields: {str(list(dataset.keys()))}. "
f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. ") from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__a)
batch_idx += 1
| 300 |
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300 | 1 |
import os
from datetime import datetime as dt
from github import Github
lowerCamelCase_ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCamelCase__ = g.get_repo("""huggingface/diffusers""" )
UpperCamelCase__ = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCamelCase__ = sorted(issue.get_comments() , key=lambda __a : i.created_at , reverse=lowerCAmelCase__ )
UpperCamelCase__ = comments[0] if len(lowerCAmelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 244 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : Any = min(lowerCAmelCase__ ) # min() finds the minimum value
UpperCAmelCase__ : Optional[int] = max(lowerCAmelCase__ ) # max() finds the maximum value
UpperCAmelCase__ : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
UpperCAmelCase__ : Any = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
UpperCAmelCase__ : Optional[int] = 0
for count in range(lowerCAmelCase__ ):
while holes[count] > 0:
holes[count] -= 1
UpperCAmelCase__ : Dict = count + min_val
i += 1
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase__ : List[str] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowerCAmelCase__ )
print('''Sorted order is:''' , ''' '''.join(lowerCAmelCase__ ) )
if __name__ == "__main__":
main()
| 181 | 0 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : Any , *,
a : int = 4 , a : int = 7_68 , a : int , a : Optional[int] , ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.Parameter(torch.zeros(a ) )
# parameters for additional clip time embeddings
__lowerCamelCase = nn.Linear(a , a )
__lowerCamelCase = nn.Linear(a , a )
# parameters for encoder hidden states
__lowerCamelCase = clip_extra_context_tokens
__lowerCamelCase = nn.Linear(
a , self.clip_extra_context_tokens * cross_attention_dim )
__lowerCamelCase = nn.Linear(a , a )
__lowerCamelCase = nn.LayerNorm(a )
def SCREAMING_SNAKE_CASE__ ( self : int , *, a : Union[str, Any] , a : Any , a : str , a : Any ):
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__lowerCamelCase = image_embeddings.shape[0]
__lowerCamelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__lowerCamelCase = classifier_free_guidance_embeddings.expand(
a , -1 )
__lowerCamelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__lowerCamelCase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__lowerCamelCase = self.embedding_proj(a )
__lowerCamelCase = self.clip_image_embeddings_project_to_time_embeddings(a )
__lowerCamelCase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__lowerCamelCase = self.clip_extra_context_tokens_proj(a )
__lowerCamelCase = clip_extra_context_tokens.reshape(a , -1 , self.clip_extra_context_tokens )
__lowerCamelCase = clip_extra_context_tokens.permute(0 , 2 , 1 )
__lowerCamelCase = self.encoder_hidden_states_proj(a )
__lowerCamelCase = self.text_encoder_hidden_states_norm(a )
__lowerCamelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 237 | '''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int ):
"""simple docstring"""
__lowerCamelCase = 3
__lowerCamelCase = 2_50
__lowerCamelCase = ids_tensor((batch_size, length) , a )
__lowerCamelCase = torch.ones((batch_size, length) , device=a , dtype=torch.float ) / length
return input_ids, scores
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self._get_tensors(5 )
__lowerCamelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(a , a ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = MaxLengthCriteria(max_length=10 )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(a , a ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(a , a ) )
__lowerCamelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self._get_tensors(5 )
__lowerCamelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(a , a ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(a ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__lowerCamelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(a ) , 1 )
| 237 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ :Any = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = original_name.split('.' )[0]
_UpperCAmelCase = key.split('.' )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] )
_UpperCAmelCase = orig_block_num - offset
_UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCAmelCase__ ( a__: Tuple ) -> int:
'''simple docstring'''
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase , _UpperCAmelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
_UpperCAmelCase = key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCAmelCase = key[: key.find('proj' )]
_UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' )
_UpperCAmelCase = key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCAmelCase = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' )
if "norm2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
_UpperCAmelCase = key.replace('head' , 'classifier' )
_UpperCAmelCase = value
return new_state_dict
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = model_name[-3:]
_UpperCAmelCase = 1_0_0_0
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = (1, 1_0_0_0)
# set config attributes
_UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCAmelCase = [2, 2, 6, 2]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s24":
_UpperCAmelCase = [4, 4, 1_2, 4]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.9
elif size == "m36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
elif size == "m48":
_UpperCAmelCase = [8, 8, 2_4, 8]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
# Prepare image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) )
# rename keys
_UpperCAmelCase = rename_keys(a__ )
# create HuggingFace model and load state dict
_UpperCAmelCase = PoolFormerForImageClassification(a__ )
model.load_state_dict(a__ )
model.eval()
# Define image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
_UpperCAmelCase = model(a__ )
_UpperCAmelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
_UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
_UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
_UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
_UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ :Dict = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 329 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ :Dict = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class __a ( UpperCAmelCase ):
_a : List[str] = 'openai-gpt'
_a : int = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=40478 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = afn
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = summary_type
_UpperCAmelCase = summary_use_proj
_UpperCAmelCase = summary_activation
_UpperCAmelCase = summary_first_dropout
_UpperCAmelCase = summary_proj_to_labels
super().__init__(**_SCREAMING_SNAKE_CASE )
| 329 | 1 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=128 , UpperCamelCase__=32 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ) -> List[Any]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
(
A_
) = self.prepare_config_and_inputs()
A_ = True
A_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = NezhaModel(config=__A )
model.to(__A )
model.eval()
A_ = model(__A , attention_mask=__A , token_type_ids=__A )
A_ = model(__A , token_type_ids=__A )
A_ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
A_ = True
A_ = NezhaModel(__A )
model.to(__A )
model.eval()
A_ = model(
__A , attention_mask=__A , token_type_ids=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )
A_ = model(
__A , attention_mask=__A , token_type_ids=__A , encoder_hidden_states=__A , )
A_ = model(__A , attention_mask=__A , token_type_ids=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = NezhaForMaskedLM(config=__A )
model.to(__A )
model.eval()
A_ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = NezhaForNextSentencePrediction(config=__A )
model.to(__A )
model.eval()
A_ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = NezhaForPreTraining(config=__A )
model.to(__A )
model.eval()
A_ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , next_sentence_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = NezhaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
A_ = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = NezhaForSequenceClassification(__A )
model.to(__A )
model.eval()
A_ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = NezhaForTokenClassification(config=__A )
model.to(__A )
model.eval()
A_ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = self.num_choices
A_ = NezhaForMultipleChoice(config=__A )
model.to(__A )
model.eval()
A_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
A_
) = config_and_inputs
A_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
A_ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
A_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
A_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = NezhaModelTester(self )
A_ = ConfigTester(self , config_class=__A , hidden_size=37 )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__A )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
# This regression test was failing with PyTorch < 1.3
(
A_
) = self.model_tester.prepare_config_and_inputs_for_decoder()
A_ = None
self.model_tester.create_and_check_model_as_decoder(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__A )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def snake_case_ ( self ) -> int:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = NezhaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
A_ = True
A_ = model_class(config=__A )
A_ = self._prepare_for_class(__A , __A )
A_ = torch.jit.trace(
__A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , """bert.pt""" ) )
A_ = torch.jit.load(os.path.join(__A , """bert.pt""" ) , map_location=__A )
loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
A_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A_ = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A_ = model(__A , attention_mask=__A )[0]
A_ = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __A )
A_ = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
A_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A_ = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A_ = model(__A , attention_mask=__A )[0]
A_ = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , __A )
A_ = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) )
| 361 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=2 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=36 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=6 , UpperCamelCase__=6 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , UpperCamelCase__=1000 , ) -> Optional[int]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = patch_size
A_ = text_seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = coordinate_size
A_ = shape_size
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
A_ = text_seq_length
A_ = (image_size // patch_size) ** 2 + 1
A_ = self.text_seq_length + self.image_seq_length
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
A_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ = bbox[i, j, 3]
A_ = bbox[i, j, 1]
A_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ = bbox[i, j, 2]
A_ = bbox[i, j, 0]
A_ = t
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.text_seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
A_ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = LayoutLMvaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# text + image
A_ = model(UpperCamelCase__ , pixel_values=UpperCamelCase__ )
A_ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
A_ = model(pixel_values=UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = LayoutLMvaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = self.num_labels
A_ = LayoutLMvaForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = LayoutLMvaForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
lowercase = False
lowercase = False
lowercase = False
lowercase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = LayoutLMvaModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Union[str, Any]:
'''simple docstring'''
A_ = copy.deepcopy(UpperCamelCase__ )
if model_class in get_values(UpperCamelCase__ ):
A_ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCamelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
A_ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in get_values(UpperCamelCase__ ):
A_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
A_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
A_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
A_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCamelCase__ , )
return inputs_dict
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = LayoutLMvaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase__ ( ) -> Dict:
A_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class A__ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ ) if is_vision_available() else None
@slow
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(UpperCamelCase__ )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values.to(UpperCamelCase__ )
A_ = torch.tensor([[1, 2]] )
A_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
A_ = model(
input_ids=input_ids.to(UpperCamelCase__ ) , bbox=bbox.to(UpperCamelCase__ ) , pixel_values=pixel_values.to(UpperCamelCase__ ) , )
# verify the logits
A_ = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ )
A_ = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 101 | 0 |
"""simple docstring"""
_a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _A ( UpperCamelCase_ : List[Any], UpperCamelCase_ : Optional[Any], UpperCamelCase_ : Tuple, UpperCamelCase_ : str) -> Any:
'''simple docstring'''
__lowercase = [False] * len(UpperCamelCase_)
__lowercase = [s]
__lowercase = True
while queue:
__lowercase = queue.pop(0)
for ind in range(len(graph[u])):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCamelCase_)
__lowercase = True
__lowercase = u
return visited[t]
def _A ( UpperCamelCase_ : int, UpperCamelCase_ : Tuple, UpperCamelCase_ : List[str]) -> Optional[int]:
'''simple docstring'''
__lowercase = [-1] * (len(UpperCamelCase_))
__lowercase = 0
__lowercase = []
__lowercase = [i[:] for i in graph] # Record original cut, copy.
while bfs(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_):
__lowercase = float("Inf")
__lowercase = sink
while s != source:
# Find the minimum value in select path
__lowercase = min(UpperCamelCase_, graph[parent[s]][s])
__lowercase = parent[s]
max_flow += path_flow
__lowercase = sink
while v != source:
__lowercase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowercase = parent[v]
for i in range(len(UpperCamelCase_)):
for j in range(len(graph[0])):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j))
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 17 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCamelCase : Any = TypeVar('T')
class a ( Generic[T] ):
def __init__( self , _lowerCamelCase ):
lowercase = data
lowercase = None
def __str__( self ):
return F'{self.data}'
class a ( Generic[T] ):
def __init__( self ):
lowercase = None
def __iter__( self ):
lowercase = self.top
while node:
yield node.data
lowercase = node.next
def __str__( self ):
return "->".join([str(_lowerCamelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def UpperCamelCase_ ( self ):
return self.top is None
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = Node(_lowerCamelCase )
if not self.is_empty():
lowercase = self.top
lowercase = node
def UpperCamelCase_ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _lowerCamelCase )
lowercase = self.top
lowercase = self.top.next
return pop_node.data
def UpperCamelCase_ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase_ ( self ):
lowercase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 220 | 0 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __snake_case :
def __init__( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=1_3 , __lowerCAmelCase : str=7 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : int=9_9 , __lowerCAmelCase : Any=3_2 , __lowerCAmelCase : Any=5 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Any=3_7 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Dict=5_1_2 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : str=3 , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Optional[int]=None , ):
"""simple docstring"""
_lowerCamelCase : str = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : Optional[int] = seq_length
_lowerCamelCase : int = is_training
_lowerCamelCase : str = use_input_mask
_lowerCamelCase : List[str] = use_token_type_ids
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : int = type_vocab_size
_lowerCamelCase : Any = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[Any] = num_labels
_lowerCamelCase : List[str] = num_choices
_lowerCamelCase : int = scope
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_input_mask:
_lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : int = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = OpenLlamaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : Tuple = model(lowercase_ , attention_mask=lowercase_ )
_lowerCamelCase : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Optional[int] = OpenLlamaModel(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
_lowerCamelCase : Optional[int] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
_lowerCamelCase : Tuple = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , ):
"""simple docstring"""
_lowerCamelCase : str = OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : List[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
_lowerCamelCase : int = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
_lowerCamelCase : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCamelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCamelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCamelCase : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )["hidden_states"][0]
_lowerCamelCase : List[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )["hidden_states"][0]
# select random slice
_lowerCamelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase : int = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
_lowerCamelCase
) : Optional[Any] = config_and_inputs
_lowerCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase):
snake_case__ : List[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
snake_case__ : Optional[int] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
snake_case__ : Any = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Tuple = False
snake_case__ : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = OpenLlamaModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : Optional[Any] = type
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[int] = 3
_lowerCamelCase : Optional[Any] = input_dict["input_ids"]
_lowerCamelCase : Union[str, Any] = input_ids.ne(1 ).to(lowercase_ )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCamelCase : str = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : int = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : str = 3
_lowerCamelCase : str = "single_label_classification"
_lowerCamelCase : Any = input_dict["input_ids"]
_lowerCamelCase : int = input_ids.ne(1 ).to(lowercase_ )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : List[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : int = 3
_lowerCamelCase : Union[str, Any] = "multi_label_classification"
_lowerCamelCase : Dict = input_dict["input_ids"]
_lowerCamelCase : Tuple = input_ids.ne(1 ).to(lowercase_ )
_lowerCamelCase : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCamelCase : List[str] = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCamelCase : List[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Dict = ids_tensor([1, 1_0] , config.vocab_size )
_lowerCamelCase : List[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_lowerCamelCase : Union[str, Any] = OpenLlamaModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
_lowerCamelCase : Optional[int] = original_model(lowercase_ ).last_hidden_state
_lowerCamelCase : Optional[int] = original_model(lowercase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_lowerCamelCase : Optional[int] = {"type": scaling_type, "factor": 10.0}
_lowerCamelCase : List[Any] = OpenLlamaModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
_lowerCamelCase : Tuple = scaled_model(lowercase_ ).last_hidden_state
_lowerCamelCase : Any = scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
| 351 |
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCAmelCase__ = logging.getLogger(__name__)
class __snake_case ( _lowercase):
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=None ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , index=__lowerCAmelCase , init_retrieval=__lowerCAmelCase , )
_lowerCamelCase : Dict = None
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : int ):
"""simple docstring"""
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
_lowerCamelCase : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
_lowerCamelCase : Dict = str(distributed_port + 1 )
_lowerCamelCase : str = dist.new_group(ranks=__lowerCAmelCase , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple=torch.floataa ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = torch.empty(__lowerCAmelCase , dtype=__lowerCAmelCase )
dist.scatter(__lowerCAmelCase , src=0 , scatter_list=__lowerCAmelCase , group=self.process_group )
return target_tensor
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[str] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_lowerCamelCase : str = next((addr for addr in addrs if addr.startswith('''e''' )) , __lowerCAmelCase )
return ifname
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : int ):
"""simple docstring"""
if not dist.is_initialized():
_lowerCamelCase , _lowerCamelCase : Any = self._main_retrieve(__lowerCAmelCase , __lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCAmelCase )
# distributed training
_lowerCamelCase : Dict = dist.get_world_size(group=self.process_group )
# gather logic
_lowerCamelCase : str = None
if self._is_main():
_lowerCamelCase : List[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCAmelCase )]
dist.gather(torch.tensor(__lowerCAmelCase ) , dst=0 , gather_list=__lowerCAmelCase , group=self.process_group )
# scatter logic
_lowerCamelCase : int = question_hidden_states.shape[0]
_lowerCamelCase : str = []
_lowerCamelCase : Optional[int] = []
if self._is_main():
assert len(__lowerCAmelCase ) == world_size
_lowerCamelCase , _lowerCamelCase : Tuple = self._main_retrieve(torch.cat(__lowerCAmelCase ).numpy() , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[str] = torch.tensor(__lowerCAmelCase ), torch.tensor(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self._chunk_tensor(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self._chunk_tensor(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : int = self._scattered(__lowerCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
_lowerCamelCase : str = self._scattered(__lowerCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCAmelCase )
| 175 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->List[Any]:
"""simple docstring"""
a_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Optional[int]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
a_ = ""
else:
a_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
a_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[
: config.hidden_size, :
]
a_ = in_proj_bias[: config.hidden_size]
a_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a_ = in_proj_weight[
-config.hidden_size :, :
]
a_ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = dct.pop(_snake_case )
a_ = val
def UpperCamelCase ( ) ->int:
"""simple docstring"""
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True ) ->str:
"""simple docstring"""
a_ = ViTConfig()
# patch_size
if model_name[-1] == "8":
a_ = 8
# set labels if required
if not base_model:
a_ = 1_000
a_ = "huggingface/label-files"
a_ = "imagenet-1k-id2label.json"
a_ = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
a_ = {int(_snake_case ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
a_ = 384
a_ = 1_536
a_ = 12
a_ = 6
# load original model from torch hub
a_ = torch.hub.load("facebookresearch/dino:main" , _snake_case )
original_model.eval()
# load state_dict of original model, remove and rename some keys
a_ = original_model.state_dict()
if base_model:
remove_classification_head_(_snake_case )
a_ = create_rename_keys(_snake_case , base_model=_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_q_k_v(_snake_case , _snake_case , _snake_case )
# load HuggingFace model
if base_model:
a_ = ViTModel(_snake_case , add_pooling_layer=_snake_case ).eval()
else:
a_ = ViTForImageClassification(_snake_case ).eval()
model.load_state_dict(_snake_case )
# Check outputs on an image, prepared by ViTImageProcessor
a_ = ViTImageProcessor()
a_ = image_processor(images=prepare_img() , return_tensors="pt" )
a_ = encoding["pixel_values"]
a_ = model(_snake_case )
if base_model:
a_ = original_model(_snake_case )
assert torch.allclose(_snake_case , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
a_ = original_model(_snake_case )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_snake_case , outputs.logits , atol=1E-3 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
UpperCamelCase_ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model) | 243 |
"""simple docstring"""
from manim import *
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Dict ):
_A = Rectangle(height=0.5 , width=0.5 )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_A = Rectangle(height=0.25 , width=0.25 )
_A = [mem.copy() for i in range(6 )]
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('CPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(4 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('GPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Model' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_UpperCAmelCase )
_A = []
_A = []
for i, rect in enumerate(_UpperCAmelCase ):
_A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 )
target.move_to(_UpperCAmelCase )
model_arr.append(_UpperCAmelCase )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_UpperCAmelCase )
self.add(*_UpperCAmelCase , *_UpperCAmelCase )
_A = [meta_mem.copy() for i in range(6 )]
_A = [meta_mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Disk' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_A = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_UpperCAmelCase )
_A = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase ) )
_A = Square(0.3 )
input.set_fill(_UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 )
self.play(Write(_UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(_UpperCAmelCase ) )
self.play(FadeOut(_UpperCAmelCase ) )
_A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_A = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) )
_A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_A = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_A = AnimationGroup(
FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_A = 0.7
self.play(
Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_A = a_c
_A = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , )
_A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) )
self.wait()
| 315 | 0 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCAmelCase = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def _snake_case ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ) -> Dict:
"""simple docstring"""
return max(metric_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for gt in ground_truths )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , """r""" ).readlines()]
lowerCAmelCase = []
if args.gold_data_mode == "qa":
lowerCAmelCase = pd.read_csv(_SCREAMING_SNAKE_CASE , sep="""\t""" , header=_SCREAMING_SNAKE_CASE )
for answer_list in data[1]:
lowerCAmelCase = ast.literal_eval(_SCREAMING_SNAKE_CASE )
answers.append(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , """r""" ).readlines()]
lowerCAmelCase = [[reference] for reference in references]
lowerCAmelCase = lowerCAmelCase = lowerCAmelCase = 0
for prediction, ground_truths in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
total += 1
em += metric_max_over_ground_truths(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
fa += metric_max_over_ground_truths(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = 100.0 * em / total
lowerCAmelCase = 100.0 * fa / total
logger.info(f'F1: {fa:.2f}' )
logger.info(f'EM: {em:.2f}' )
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = args.k
lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , """r""" ).readlines()]
lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , """r""" ).readlines()]
lowerCAmelCase = lowerCAmelCase = 0
for hypo, reference in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = set(hypo.split("""\t""" )[:k] )
lowerCAmelCase = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
lowerCAmelCase = 100.0 * em / total
logger.info(f'Precision@{k}: {em: .2f}' )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
"""simple docstring"""
def strip_title(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
if title.startswith("""\"""" ):
lowerCAmelCase = title[1:]
if title.endswith("""\"""" ):
lowerCAmelCase = title[:-1]
return title
lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device )
lowerCAmelCase = rag_model.rag.question_encoder(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = question_enc_outputs[0]
lowerCAmelCase = rag_model.retriever(
_SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
lowerCAmelCase = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
lowerCAmelCase = []
for docs in all_docs:
lowerCAmelCase = [strip_title(_SCREAMING_SNAKE_CASE ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(_SCREAMING_SNAKE_CASE ) )
return provenance_strings
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = inputs_dict.input_ids.to(args.device )
lowerCAmelCase = inputs_dict.attention_mask.to(args.device )
lowerCAmelCase = rag_model.generate( # rag_model overwrites generate
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
lowerCAmelCase = rag_model.retriever.generator_tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
if args.print_predictions:
for q, a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info("""Q: {} - A: {}""".format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
return answers
def _snake_case ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=_SCREAMING_SNAKE_CASE , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=_SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=_SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=_SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=_SCREAMING_SNAKE_CASE , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=_SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=_SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=_SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=_SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=_SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=_SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=_SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = {}
if args.model_type is None:
lowerCAmelCase = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
lowerCAmelCase = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
lowerCAmelCase = args.n_docs
if args.index_name is not None:
lowerCAmelCase = args.index_name
if args.index_path is not None:
lowerCAmelCase = args.index_path
else:
lowerCAmelCase = BartForConditionalGeneration
lowerCAmelCase = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
lowerCAmelCase = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(_SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(_SCREAMING_SNAKE_CASE ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
lowerCAmelCase = RagRetriever.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , retriever=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
model.retriever.init_retrieval()
else:
lowerCAmelCase = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
lowerCAmelCase = []
for line in tqdm(_SCREAMING_SNAKE_CASE ):
questions.append(line.strip() )
if len(_SCREAMING_SNAKE_CASE ) == args.eval_batch_size:
lowerCAmelCase = evaluate_batch_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
preds_file.write("""\n""".join(_SCREAMING_SNAKE_CASE ) + """\n""" )
preds_file.flush()
lowerCAmelCase = []
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase = evaluate_batch_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
preds_file.write("""\n""".join(_SCREAMING_SNAKE_CASE ) )
preds_file.flush()
score_fn(_SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCAmelCase = get_args()
main(args) | 187 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( _SCREAMING_SNAKE_CASE : int | str ) -> bool:
"""simple docstring"""
lowerCAmelCase = str(_SCREAMING_SNAKE_CASE )
return n == n[::-1]
def _snake_case ( _SCREAMING_SNAKE_CASE : int = 1_000_000 ) -> Dict:
"""simple docstring"""
lowerCAmelCase = 0
for i in range(1 , _SCREAMING_SNAKE_CASE ):
if is_palindrome(_SCREAMING_SNAKE_CASE ) and is_palindrome(bin(_SCREAMING_SNAKE_CASE ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip())))) | 187 | 1 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_UpperCamelCase = '''examples/'''
_UpperCamelCase = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_UpperCamelCase = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_UpperCamelCase = '''README.md'''
def lowercase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
"""simple docstring"""
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__UpperCAmelCase : Optional[int] = f.read()
__UpperCAmelCase , __UpperCAmelCase : List[str] = REPLACE_PATTERNS[pattern]
__UpperCAmelCase : Union[str, Any] = replace.replace("""VERSION""" , lowerCAmelCase__ )
__UpperCAmelCase : Union[str, Any] = re_pattern.sub(lowerCAmelCase__ , lowerCAmelCase__ )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(lowerCAmelCase__ )
def lowercase_ ( lowerCAmelCase__ : Tuple ):
"""simple docstring"""
for folder, directories, fnames in os.walk(lowerCAmelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ , pattern="""examples""" )
def lowercase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not patch:
update_version_in_examples(lowerCAmelCase__ )
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = """🤗 Transformers currently provides the following architectures"""
__UpperCAmelCase : Dict = """1. Want to contribute a new model?"""
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__UpperCAmelCase : Optional[Any] = f.readlines()
# Find the start of the list.
__UpperCAmelCase : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__UpperCAmelCase : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__UpperCAmelCase : int = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowerCAmelCase__ )
def lowercase_ ( ):
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__UpperCAmelCase : Dict = f.read()
__UpperCAmelCase : Dict = REPLACE_PATTERNS["""init"""][0].search(lowerCAmelCase__ ).groups()[0]
return packaging.version.parse(lowerCAmelCase__ )
def lowercase_ ( lowerCAmelCase__ : Tuple=False ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__UpperCAmelCase : Optional[Any] = default_version.base_version
elif patch:
__UpperCAmelCase : Dict = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
__UpperCAmelCase : List[Any] = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
__UpperCAmelCase : Union[str, Any] = input(f'Which version are you releasing? [{default_version}]' )
if len(lowerCAmelCase__ ) == 0:
__UpperCAmelCase : Tuple = default_version
print(f'Updating version to {version}.' )
global_version_update(lowerCAmelCase__ , patch=lowerCAmelCase__ )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Tuple = get_version()
__UpperCAmelCase : int = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
__UpperCAmelCase : List[Any] = current_version.base_version
# Check with the user we got that right.
__UpperCAmelCase : str = input(f'Which version are we developing now? [{dev_version}]' )
if len(lowerCAmelCase__ ) == 0:
__UpperCAmelCase : Union[str, Any] = dev_version
print(f'Updating version to {version}.' )
global_version_update(lowerCAmelCase__ )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 254 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_UpperCamelCase = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
_UpperCamelCase = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
_UpperCamelCase = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def __A ( self ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase="binary" , __UpperCAmelCase=None ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = fa_score(
__UpperCAmelCase , __UpperCAmelCase , labels=__UpperCAmelCase , pos_label=__UpperCAmelCase , average=__UpperCAmelCase , sample_weight=__UpperCAmelCase )
return {"f1": float(__UpperCAmelCase ) if score.size == 1 else score}
| 254 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCAmelCase__ : List[str] ='''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowerCAmelCase__ : Any ='''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
lowerCAmelCase__ : List[str] ='''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def _A ( self , _A , _A , _A = CHRF.CHAR_ORDER , _A = CHRF.WORD_ORDER , _A = CHRF.BETA , _A = False , _A = False , _A = False , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(references[0] )
if any(len(_A ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
__SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(_A )]
__SCREAMING_SNAKE_CASE = CHRF(_A , _A , _A , _A , _A , _A )
__SCREAMING_SNAKE_CASE = sb_chrf.corpus_score(_A , _A )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 370 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __lowercase ( a__ ) -> List[Any]: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __lowercase ( ) -> Any:
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
__SCREAMING_SNAKE_CASE = [1, 2, 3]
with pytest.raises(a__ ):
with parallel_backend('unsupported backend' ):
map_nested(a__ , a__ , num_proc=2 )
with pytest.raises(a__ ):
with parallel_backend('unsupported backend' ):
map_nested(a__ , a__ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def __lowercase ( a__ ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = [1, 2]
__SCREAMING_SNAKE_CASE = {'a': 1, 'b': 2}
__SCREAMING_SNAKE_CASE = {'a': [1, 2], 'b': [3, 4]}
__SCREAMING_SNAKE_CASE = {'a': {'1': 1}, 'b': 2}
__SCREAMING_SNAKE_CASE = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
__SCREAMING_SNAKE_CASE = [2, 3]
__SCREAMING_SNAKE_CASE = {'a': 2, 'b': 3}
__SCREAMING_SNAKE_CASE = {'a': [2, 3], 'b': [4, 5]}
__SCREAMING_SNAKE_CASE = {'a': {'1': 2}, 'b': 3}
__SCREAMING_SNAKE_CASE = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(a__ , a__ , num_proc=a__ ) == expected_map_nested_sa
assert map_nested(a__ , a__ , num_proc=a__ ) == expected_map_nested_sa
assert map_nested(a__ , a__ , num_proc=a__ ) == expected_map_nested_sa
assert map_nested(a__ , a__ , num_proc=a__ ) == expected_map_nested_sa
assert map_nested(a__ , a__ , num_proc=a__ ) == expected_map_nested_sa
| 118 | 0 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __magic_name__ :
'''simple docstring'''
def __init__( self, lowercase_, lowercase_=13, lowercase_=7, lowercase_=6, lowercase_=17, lowercase_=23, lowercase_=11, lowercase_=True, ) -> Tuple:
"""simple docstring"""
a__ =parent
a__ =batch_size
a__ =seq_length
a__ =act_dim
a__ =state_dim
a__ =hidden_size
a__ =max_length
a__ =is_training
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__ =floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
a__ =floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
a__ =floats_tensor((self.batch_size, self.seq_length, 1) )
a__ =floats_tensor((self.batch_size, self.seq_length, 1) )
a__ =ids_tensor((self.batch_size, self.seq_length), vocab_size=1000 )
a__ =random_attention_mask((self.batch_size, self.seq_length) )
a__ =self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size, seq_length=self.seq_length, act_dim=self.act_dim, state_dim=self.state_dim, hidden_size=self.hidden_size, max_length=self.max_length, )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> int:
"""simple docstring"""
a__ =DecisionTransformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ )
self.parent.assertEqual(result.state_preds.shape, states.shape )
self.parent.assertEqual(result.action_preds.shape, actions.shape )
self.parent.assertEqual(result.return_preds.shape, returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ =self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
),
) =config_and_inputs
a__ ={
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCamelCase__ : Any = ()
lowerCamelCase__ : Optional[int] = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCamelCase__ : Any = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCamelCase__ : str = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : str = False
lowerCamelCase__ : str = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Dict = False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ =DecisionTransformerModelTester(self )
a__ =ConfigTester(self, config_class=lowercase_, hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
@slow
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ =DecisionTransformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_ )
a__ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =[
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(lowercase_ )], lowercase_ )
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
a__ =2 # number of steps of autoregressive prediction we will perform
a__ =10 # defined by the RL environment, may be normalized
a__ =DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
a__ =model.to(lowercase_ )
a__ =model.config
torch.manual_seed(0 )
a__ =torch.randn(1, 1, config.state_dim ).to(device=lowercase_, dtype=torch.floataa ) # env.reset()
a__ =torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]], device=lowercase_ )
a__ =torch.tensor(lowercase_, device=lowercase_, dtype=torch.floataa ).reshape(1, 1, 1 )
a__ =state
a__ =torch.zeros(1, 0, config.act_dim, device=lowercase_, dtype=torch.floataa )
a__ =torch.zeros(1, 0, device=lowercase_, dtype=torch.floataa )
a__ =torch.tensor(0, device=lowercase_, dtype=torch.long ).reshape(1, 1 )
for step in range(lowercase_ ):
a__ =torch.cat([actions, torch.zeros(1, 1, config.act_dim, device=lowercase_ )], dim=1 )
a__ =torch.cat([rewards, torch.zeros(1, 1, device=lowercase_ )], dim=1 )
a__ =torch.ones(1, states.shape[1] ).to(dtype=torch.long, device=states.device )
with torch.no_grad():
a__, a__, a__ =model(
states=lowercase_, actions=lowercase_, rewards=lowercase_, returns_to_go=lowercase_, timesteps=lowercase_, attention_mask=lowercase_, return_dict=lowercase_, )
self.assertEqual(action_pred.shape, actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1], expected_outputs[step], atol=1E-4 ) )
a__, a__, a__, a__ =( # env.step(action)
torch.randn(1, 1, config.state_dim ).to(device=lowercase_, dtype=torch.floataa ),
1.0,
False,
{},
)
a__ =action_pred[0, -1]
a__ =torch.cat([states, state], dim=1 )
a__ =returns_to_go[0, -1] - reward
a__ =torch.cat([returns_to_go, pred_return.reshape(1, 1, 1 )], dim=1 )
a__ =torch.cat(
[timesteps, torch.ones((1, 1), device=lowercase_, dtype=torch.long ) * (step + 1)], dim=1 )
| 188 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCAmelCase__ ( _A : int = 3 ):
'''simple docstring'''
if isinstance(_A , _A ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_A ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
a__ =QuantumRegister(_A , '''qr''' )
a__ =ClassicalRegister(_A , '''cr''' )
a__ =QuantumCircuit(_A , _A )
a__ =number_of_qubits
for i in range(_A ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_A ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_A , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_A , _A )
# simulate with 10000 shots
a__ =Aer.get_backend('''qasm_simulator''' )
a__ =execute(_A , _A , shots=1_00_00 )
return job.result().get_counts(_A )
if __name__ == "__main__":
print(
f"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 188 | 1 |
from __future__ import annotations
import requests
__SCREAMING_SNAKE_CASE : Tuple = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def snake_case (__lowercase , __lowercase = 1 , __lowercase = "new" , __lowercase = None ) -> dict:
'''simple docstring'''
_snake_case : Union[str, Any] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__lowercase ) - valid_terms ) ):
_snake_case : List[str] = F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(__lowercase )
_snake_case : Any = requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
_snake_case : Optional[int] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__lowercase )}
_snake_case : Union[str, Any] = {}
for id_ in range(__lowercase ):
_snake_case : Dict = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext'])) | 284 | from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def snake_case (__lowercase , __lowercase , __lowercase=None , __lowercase=None ) -> Tuple:
'''simple docstring'''
if attention_mask is None:
_snake_case : List[Any] = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowercase_ :
_lowerCamelCase = OPTConfig
_lowerCamelCase = {}
_lowerCamelCase = 'gelu'
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=16 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=16 , lowercase_=16 , ):
_snake_case : Dict = parent
_snake_case : List[str] = batch_size
_snake_case : Optional[Any] = seq_length
_snake_case : Dict = is_training
_snake_case : List[Any] = use_labels
_snake_case : Dict = vocab_size
_snake_case : Tuple = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Tuple = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : Any = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Tuple = max_position_embeddings
_snake_case : List[Any] = eos_token_id
_snake_case : Optional[int] = pad_token_id
_snake_case : Dict = bos_token_id
_snake_case : List[Any] = embed_dim
_snake_case : Optional[int] = word_embed_proj_dim
_snake_case : Union[str, Any] = False
def UpperCamelCase ( self ):
_snake_case : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : int = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , )
_snake_case : Any = prepare_opt_inputs_dict(lowercase_ , lowercase_ )
return config, inputs_dict
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Any = TFOPTModel(config=lowercase_ )
_snake_case : int = inputs_dict["input_ids"]
_snake_case : Optional[int] = input_ids[:1, :]
_snake_case : Any = inputs_dict["attention_mask"][:1, :]
_snake_case : List[str] = 1
# first forward pass
_snake_case : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
_snake_case ,_snake_case : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : int = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ )[0]
_snake_case : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
@require_tf
class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
_lowerCamelCase = (TFOPTForCausalLM,) if is_tf_available() else ()
_lowerCamelCase = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = 10
def UpperCamelCase ( self ):
_snake_case : Dict = TFOPTModelTester(self )
_snake_case : Optional[int] = ConfigTester(self , config_class=lowercase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
_snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase_ , lowercase_ ):
if hasattr(lowercase_ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase_ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_snake_case : Dict = model_class(config=lowercase_ )
_snake_case : Union[str, Any] = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
_snake_case : Optional[Any] = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase_ )
_snake_case : int = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
_snake_case : Tuple = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_snake_case : Any = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase_ )
# check that weights remain the same after resizing
_snake_case : Dict = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_snake_case : Optional[int] = False
self.assertTrue(lowercase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase_ )
_snake_case : Optional[int] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_snake_case : str = False
self.assertTrue(lowercase_ )
def snake_case (__lowercase ) -> Dict:
'''simple docstring'''
return tf.constant(__lowercase , dtype=tf.intaa )
@require_tf
class lowercase_ ( unittest.TestCase ):
_lowerCamelCase = 99
def UpperCamelCase ( self ):
_snake_case : Any = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_snake_case : Optional[int] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_snake_case : List[Any] = input_ids.shape[0]
_snake_case : List[str] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowercase_ ( unittest.TestCase ):
@slow
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = TFOPTModel.from_pretrained("facebook/opt-350m" )
_snake_case : Optional[Any] = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_snake_case : Optional[int] = tf.not_equal(lowercase_ , model.config.pad_token_id )
with tf.GradientTape():
_snake_case : List[Any] = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state
_snake_case : List[str] = (1, 11, 512)
self.assertEqual(output.shape , lowercase_ )
_snake_case : Optional[int] = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-3 ) )
_snake_case : List[Any] = tf.function(lowercase_ , jit_compile=lowercase_ )
_snake_case : Tuple = xla_generate(lowercase_ , lowercase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-2 ) )
@require_tf
@slow
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Optional[int] = "facebook/opt-350m"
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
_snake_case : Optional[Any] = GPTaTokenizer.from_pretrained(self.path_model )
_snake_case : List[str] = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_snake_case : List[Any] = tokenizer(lowercase_ , return_tensors="tf" , padding=lowercase_ , add_special_tokens=lowercase_ )
_snake_case : List[str] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_snake_case : Tuple = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
_snake_case : List[Any] = tf.function(lowercase_ , jit_compile=lowercase_ )
_snake_case : Tuple = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
@require_tf
@slow
class lowercase_ ( unittest.TestCase ):
@property
def UpperCamelCase ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCamelCase ( self ):
_snake_case : List[Any] = "facebook/opt-125m"
_snake_case : int = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_snake_case : str = []
_snake_case : Any = GPTaTokenizer.from_pretrained(lowercase_ )
_snake_case : List[Any] = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
_snake_case : str = tokenizer(lowercase_ , return_tensors="tf" ).input_ids
_snake_case : Any = model.generate(lowercase_ , max_length=10 )
_snake_case : List[str] = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Optional[int] = "facebook/opt-350m"
_snake_case : Dict = GPTaTokenizer.from_pretrained(lowercase_ )
_snake_case : Dict = TFOPTForCausalLM.from_pretrained(lowercase_ )
_snake_case : int = "left"
# use different length sentences to test batching
_snake_case : Union[str, Any] = [
"Hello, my dog is a little",
"Today, I",
]
_snake_case : Optional[Any] = tokenizer(lowercase_ , return_tensors="tf" , padding=lowercase_ )
_snake_case : List[Any] = inputs["input_ids"]
_snake_case : Union[str, Any] = model.generate(input_ids=lowercase_ , attention_mask=inputs["attention_mask"] )
_snake_case : int = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
_snake_case : List[str] = model.generate(input_ids=lowercase_ )
_snake_case : Dict = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
_snake_case : int = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
_snake_case : int = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings )
_snake_case : Tuple = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
_snake_case : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
_snake_case : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
_snake_case : Optional[Any] = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
def UpperCamelCase ( self ):
_snake_case : Tuple = "facebook/opt-350m"
_snake_case : Optional[int] = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_snake_case : str = []
_snake_case : str = GPTaTokenizer.from_pretrained(lowercase_ )
_snake_case : List[str] = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
_snake_case : Dict = tokenizer(lowercase_ , return_tensors="tf" ).input_ids
_snake_case : Any = model.generate(lowercase_ , max_length=10 )
_snake_case : Tuple = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ ) | 284 | 1 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_snake_case )
lowerCamelCase_ : Optional[int] = -1
lowerCamelCase_ : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case )
lowerCamelCase_ : str = model.generate(_snake_case , max_new_tokens=1_0 , do_sample=_snake_case )
lowerCamelCase_ : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase_ : Union[str, Any] = TextStreamer(_snake_case )
model.generate(_snake_case , max_new_tokens=1_0 , do_sample=_snake_case , streamer=_snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase_ : Union[str, Any] = cs.out[:-1]
self.assertEqual(_snake_case , _snake_case )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase_ : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_snake_case )
lowerCamelCase_ : Union[str, Any] = -1
lowerCamelCase_ : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case )
lowerCamelCase_ : Dict = model.generate(_snake_case , max_new_tokens=1_0 , do_sample=_snake_case )
lowerCamelCase_ : Union[str, Any] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase_ : List[Any] = TextIteratorStreamer(_snake_case )
lowerCamelCase_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase_ : Optional[Any] = Thread(target=model.generate , kwargs=_snake_case )
thread.start()
lowerCamelCase_ : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_snake_case , _snake_case )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_snake_case )
lowerCamelCase_ : Optional[Any] = -1
lowerCamelCase_ : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case )
lowerCamelCase_ : List[str] = model.generate(_snake_case , max_new_tokens=1_0 , do_sample=_snake_case )
lowerCamelCase_ : Dict = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase_ : int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase_ : int = TextStreamer(_snake_case , skip_prompt=_snake_case )
model.generate(_snake_case , max_new_tokens=1_0 , do_sample=_snake_case , streamer=_snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase_ : List[Any] = cs.out[:-1]
self.assertEqual(_snake_case , _snake_case )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCamelCase_ : Dict = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_snake_case )
lowerCamelCase_ : Any = -1
lowerCamelCase_ : Optional[Any] = torch.ones((1, 5) , device=_snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase_ : Dict = TextStreamer(_snake_case , skip_special_tokens=_snake_case )
model.generate(_snake_case , max_new_tokens=1 , do_sample=_snake_case , streamer=_snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase_ : Union[str, Any] = cs.out[:-1] # Remove the final "\n"
lowerCamelCase_ : str = tokenizer(_snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase_ : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_snake_case )
lowerCamelCase_ : Union[str, Any] = -1
lowerCamelCase_ : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case )
lowerCamelCase_ : Tuple = TextIteratorStreamer(_snake_case , timeout=0.0_01 )
lowerCamelCase_ : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase_ : int = Thread(target=model.generate , kwargs=_snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_snake_case ):
lowerCamelCase_ : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 318 |
snake_case_ : Dict = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 51 | 0 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
if is_torch_version('<' , '2.0.0' ) or not hasattr(SCREAMING_SNAKE_CASE_ , '_dynamo' ):
return False
return isinstance(SCREAMING_SNAKE_CASE_ , torch._dynamo.eval_frame.OptimizedModule )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : bool = True ):
'''simple docstring'''
lowercase__ : Dict = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase__ : List[str] = is_compiled_module(SCREAMING_SNAKE_CASE_ )
if is_compiled:
lowercase__ : Optional[Any] = model
lowercase__ : int = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
lowercase__ : Dict = getattr(SCREAMING_SNAKE_CASE_ , 'forward' )
lowercase__ : str = model.__dict__.pop('_original_forward' , SCREAMING_SNAKE_CASE_ )
if original_forward is not None:
while hasattr(SCREAMING_SNAKE_CASE_ , '__wrapped__' ):
lowercase__ : Any = forward.__wrapped__
if forward == original_forward:
break
lowercase__ : int = forward
if getattr(SCREAMING_SNAKE_CASE_ , '_converted_to_transformer_engine' , SCREAMING_SNAKE_CASE_ ):
convert_model(SCREAMING_SNAKE_CASE_ , to_transformer_engine=SCREAMING_SNAKE_CASE_ )
if is_compiled:
lowercase__ : Optional[int] = model
lowercase__ : Optional[Any] = compiled_model
return model
def snake_case__ ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif PartialState().local_process_index == 0:
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@contextmanager
def snake_case__ ( **SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
for key, value in kwargs.items():
lowercase__ : Tuple = str(SCREAMING_SNAKE_CASE_ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
if not hasattr(SCREAMING_SNAKE_CASE_ , '__qualname__' ) and not hasattr(SCREAMING_SNAKE_CASE_ , '__name__' ):
lowercase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , '__class__' , SCREAMING_SNAKE_CASE_ )
if hasattr(SCREAMING_SNAKE_CASE_ , '__qualname__' ):
return obj.__qualname__
if hasattr(SCREAMING_SNAKE_CASE_ , '__name__' ):
return obj.__name__
return str(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
for key, value in source.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ : List[str] = destination.setdefault(SCREAMING_SNAKE_CASE_ , {} )
merge_dicts(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Tuple = value
return destination
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = None ):
'''simple docstring'''
if port is None:
lowercase__ : int = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 367 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : List[Any] = """speech_to_text_2"""
__lowerCamelCase : str = ["""past_key_values"""]
__lowerCamelCase : List[Any] = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , a=1_0000 , a=6 , a=2048 , a=4 , a=0.0 , a=True , a="relu" , a=256 , a=0.1 , a=0.0 , a=0.0 , a=0.02 , a=2 , a=True , a=1 , a=0 , a=2 , a=1024 , **a , ):
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = d_model
lowercase__ : int = decoder_ffn_dim
lowercase__ : Optional[Any] = decoder_layers
lowercase__ : int = decoder_attention_heads
lowercase__ : Dict = dropout
lowercase__ : Optional[int] = attention_dropout
lowercase__ : Optional[int] = activation_dropout
lowercase__ : Optional[int] = activation_function
lowercase__ : Dict = init_std
lowercase__ : List[Any] = decoder_layerdrop
lowercase__ : int = use_cache
lowercase__ : Any = decoder_layers
lowercase__ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ : Tuple = max_target_positions
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 216 | 0 |
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = [True] * 1_0_0_0_0_0_1
SCREAMING_SNAKE_CASE_ = 2
while i * i <= 1_0_0_0_0_0_0:
if seive[i]:
for j in range(i * i, 1_0_0_0_0_0_1, i):
SCREAMING_SNAKE_CASE_ = False
i += 1
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
return seive[n]
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
return any(digit in """02468""" for digit in str(_A ) )
def __lowercase ( _SCREAMING_SNAKE_CASE = 1_00_00_00 ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(_A ) and not contains_an_even_digit(_A ):
SCREAMING_SNAKE_CASE = str(_A )
SCREAMING_SNAKE_CASE = [int(str_num[j:] + str_num[:j] ) for j in range(len(_A ) )]
if all(is_prime(_A ) for i in list_nums ):
result.append(_A )
return result
def __lowercase ( ) -> Dict:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'''{len(find_circular_primes()) = }''')
| 296 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 278 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__UpperCAmelCase = pytest.mark.integration
@require_faiss
class a__ ( __lowercase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(UpperCAmelCase__ ) for x in np.arange(30 ).tolist()]} )
return dset
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
import faiss
lowerCAmelCase__ = self._create_dummy_dataset()
lowerCAmelCase__ = dset.map(
lambda lowerCamelCase_ , lowerCamelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ )
lowerCAmelCase__ = dset.add_faiss_index('''vecs''' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase__ , lowerCAmelCase__ = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
import faiss
lowerCAmelCase__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCAmelCase__ , lowerCAmelCase__ = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
import faiss
lowerCAmelCase__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase__ ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase__ , lowerCAmelCase__ = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(UpperCAmelCase__ , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
from elasticsearch import Elasticsearch
lowerCAmelCase__ = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
lowerCAmelCase__ = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCAmelCase__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
lowerCAmelCase__ = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=UpperCAmelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class a__ ( __lowercase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
import faiss
lowerCAmelCase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCAmelCase__ = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase__ = 1
lowerCAmelCase__ , lowerCAmelCase__ = index.search(UpperCAmelCase__ )
self.assertRaises(UpperCAmelCase__ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCAmelCase__ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCAmelCase__ , lowerCAmelCase__ = index.search_batch(UpperCAmelCase__ )
self.assertRaises(UpperCAmelCase__ , index.search_batch , queries[0] )
lowerCAmelCase__ = [scores[0] for scores in total_scores]
lowerCAmelCase__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase__ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
import faiss
lowerCAmelCase__ = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCAmelCase__ = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase__ ):
lowerCAmelCase__ = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
import faiss
lowerCAmelCase__ = faiss.IndexFlat(5 )
lowerCAmelCase__ = FaissIndex(custom_index=UpperCAmelCase__ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
import faiss
lowerCAmelCase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase__ ) as tmp_file:
index.save(tmp_file.name )
lowerCAmelCase__ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase__ = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase__ = 1
lowerCAmelCase__ , lowerCAmelCase__ = index.search(UpperCAmelCase__ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _snake_case ( A ) -> Dict:
import faiss
lowerCAmelCase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCAmelCase__ = '''index.faiss'''
lowerCAmelCase__ = F"""mock://{index_name}"""
index.save(A , storage_options=mockfs.storage_options )
lowerCAmelCase__ = FaissIndex.load(A , storage_options=mockfs.storage_options )
lowerCAmelCase__ = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase__ = 1
lowerCAmelCase__ , lowerCAmelCase__ = index.search(A )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a__ ( __lowercase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
lowerCAmelCase__ = Elasticsearch()
lowerCAmelCase__ = {'''acknowledged''': True}
lowerCAmelCase__ = ElasticSearchIndex(es_client=UpperCAmelCase__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
lowerCAmelCase__ = '''foo'''
lowerCAmelCase__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
lowerCAmelCase__ , lowerCAmelCase__ = index.search(UpperCAmelCase__ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCAmelCase__ = '''foo'''
lowerCAmelCase__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
lowerCAmelCase__ , lowerCAmelCase__ = index.search(UpperCAmelCase__ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCAmelCase__ = ['''foo''', '''bar''', '''foobar''']
lowerCAmelCase__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
lowerCAmelCase__ , lowerCAmelCase__ = index.search_batch(UpperCAmelCase__ )
lowerCAmelCase__ = [scores[0] for scores in total_scores]
lowerCAmelCase__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase__ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase__ )
# batched queries with timeout
lowerCAmelCase__ = ['''foo''', '''bar''', '''foobar''']
lowerCAmelCase__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
lowerCAmelCase__ , lowerCAmelCase__ = index.search_batch(UpperCAmelCase__ , request_timeout=30 )
lowerCAmelCase__ = [scores[0] for scores in total_scores]
lowerCAmelCase__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase__ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase__ ) | 361 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : torch.FloatTensor
class a__ ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase_ = 6_55_36 , lowerCamelCase_ = None , lowerCamelCase_ = 2 , lowerCamelCase_ = 2 , lowerCamelCase_ = 0 , lowerCamelCase_ = "fourier" , lowerCamelCase_ = True , lowerCamelCase_ = False , lowerCamelCase_ = 0.0 , lowerCamelCase_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase_ = "UNetMidBlock1D" , lowerCamelCase_ = None , lowerCamelCase_ = (32, 32, 64) , lowerCamelCase_ = None , lowerCamelCase_ = 8 , lowerCamelCase_ = 1 , lowerCamelCase_ = False , ) -> Optional[int]:
super().__init__()
lowerCAmelCase__ = sample_size
# time
if time_embedding_type == "fourier":
lowerCAmelCase__ = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase_ , log=lowerCamelCase_ , flip_sin_to_cos=lowerCamelCase_ )
lowerCAmelCase__ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCAmelCase__ = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase_ , downscale_freq_shift=lowerCamelCase_ )
lowerCAmelCase__ = block_out_channels[0]
if use_timestep_embedding:
lowerCAmelCase__ = block_out_channels[0] * 4
lowerCAmelCase__ = TimestepEmbedding(
in_channels=lowerCamelCase_ , time_embed_dim=lowerCamelCase_ , act_fn=lowerCamelCase_ , out_dim=block_out_channels[0] , )
lowerCAmelCase__ = nn.ModuleList([] )
lowerCAmelCase__ = None
lowerCAmelCase__ = nn.ModuleList([] )
lowerCAmelCase__ = None
# down
lowerCAmelCase__ = in_channels
for i, down_block_type in enumerate(lowerCamelCase_ ):
lowerCAmelCase__ = output_channel
lowerCAmelCase__ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCAmelCase__ = i == len(lowerCamelCase_ ) - 1
lowerCAmelCase__ = get_down_block(
lowerCamelCase_ , num_layers=lowerCamelCase_ , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase_ )
# mid
lowerCAmelCase__ = get_mid_block(
lowerCamelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase_ , add_downsample=lowerCamelCase_ , )
# up
lowerCAmelCase__ = list(reversed(lowerCamelCase_ ) )
lowerCAmelCase__ = reversed_block_out_channels[0]
if out_block_type is None:
lowerCAmelCase__ = out_channels
else:
lowerCAmelCase__ = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
lowerCAmelCase__ = output_channel
lowerCAmelCase__ = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase_ ) - 1 else final_upsample_channels
)
lowerCAmelCase__ = i == len(lowerCamelCase_ ) - 1
lowerCAmelCase__ = get_up_block(
lowerCamelCase_ , num_layers=lowerCamelCase_ , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase_ )
lowerCAmelCase__ = output_channel
# out
lowerCAmelCase__ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCAmelCase__ = get_out_block(
out_block_type=lowerCamelCase_ , num_groups_out=lowerCamelCase_ , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase_ , act_fn=lowerCamelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True , ) -> Union[UNetaDOutput, Tuple]:
lowerCAmelCase__ = timestep
if not torch.is_tensor(lowerCamelCase_ ):
lowerCAmelCase__ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase_ ) and len(timesteps.shape ) == 0:
lowerCAmelCase__ = timesteps[None].to(sample.device )
lowerCAmelCase__ = self.time_proj(lowerCamelCase_ )
if self.config.use_timestep_embedding:
lowerCAmelCase__ = self.time_mlp(lowerCamelCase_ )
else:
lowerCAmelCase__ = timestep_embed[..., None]
lowerCAmelCase__ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCAmelCase__ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCAmelCase__ = ()
for downsample_block in self.down_blocks:
lowerCAmelCase__ , lowerCAmelCase__ = downsample_block(hidden_states=lowerCamelCase_ , temb=lowerCamelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCAmelCase__ = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCAmelCase__ = down_block_res_samples[-1:]
lowerCAmelCase__ = down_block_res_samples[:-1]
lowerCAmelCase__ = upsample_block(lowerCamelCase_ , res_hidden_states_tuple=lowerCamelCase_ , temb=lowerCamelCase_ )
# 5. post-process
if self.out_block:
lowerCAmelCase__ = self.out_block(lowerCamelCase_ , lowerCamelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase_ ) | 228 | 0 |
'''simple docstring'''
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 237 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( _lowerCamelCase : list[int | str] ):
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def UpperCamelCase ( _lowerCamelCase : list[int | str] , _lowerCamelCase : list[int | str] , _lowerCamelCase : int , _lowerCamelCase : list[int] , ):
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
A__ = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
A__ = False
__lowerCAmelCase : list[int | str] =[3, 1, 2, 4]
generate_all_permutations(sequence)
__lowerCAmelCase : list[int | str] =["A", "B", "C"]
generate_all_permutations(sequence_a)
| 237 | 1 |
def __magic_name__ ( __a : int , __a : int ):
'''simple docstring'''
while a != 0:
UpperCamelCase__ , UpperCamelCase__ = b % a, a
return b
def __magic_name__ ( __a : int , __a : int ):
'''simple docstring'''
if gcd(__a , __a ) != 1:
UpperCamelCase__ = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(__a )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1, 0, a
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0, 1, m
while va != 0:
UpperCamelCase__ = ua // va
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 178 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __magic_name__ ( __a : Any ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __magic_name__ ( ):
'''simple docstring'''
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
UpperCamelCase__ = [1, 2, 3]
with pytest.raises(__a ):
with parallel_backend("""unsupported backend""" ):
map_nested(__a , __a , num_proc=2 )
with pytest.raises(__a ):
with parallel_backend("""unsupported backend""" ):
map_nested(__a , __a , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def __magic_name__ ( __a : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = [1, 2]
UpperCamelCase__ = {"""a""": 1, """b""": 2}
UpperCamelCase__ = {"""a""": [1, 2], """b""": [3, 4]}
UpperCamelCase__ = {"""a""": {"""1""": 1}, """b""": 2}
UpperCamelCase__ = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
UpperCamelCase__ = [2, 3]
UpperCamelCase__ = {"""a""": 2, """b""": 3}
UpperCamelCase__ = {"""a""": [2, 3], """b""": [4, 5]}
UpperCamelCase__ = {"""a""": {"""1""": 2}, """b""": 3}
UpperCamelCase__ = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
| 178 | 1 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__snake_case = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
__snake_case = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
__snake_case = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ , sample_weight=UpperCamelCase_ ) ),
} | 97 |
import random
from .binary_exp_mod import bin_exp_mod
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=1000 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowercase = n - 1
lowercase = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowercase = 0
while count < prec:
lowercase = random.randint(2 , n - 1 )
lowercase = bin_exp_mod(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if b != 1:
lowercase = True
for _ in range(lowerCAmelCase__ ):
if b == n - 1:
lowercase = False
break
lowercase = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowercase__ :Tuple = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 101 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowercase__ =(3, 9, -11, 0, 7, 5, 1, -1)
lowercase__ =(4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : Node | None
class UpperCamelCase__ :
def __init__(self : Any , snake_case_ : Iterable[int] ):
__a : Node | None = None
for i in sorted(snake_case_ , reverse=snake_case_ ):
__a : Optional[int] = Node(snake_case_ , self.head )
def __iter__(self : Union[str, Any] ):
__a : Tuple = self.head
while node:
yield node.data
__a : Union[str, Any] = node.next_node
def __len__(self : int ):
return sum(1 for _ in self )
def __str__(self : Tuple ):
return " -> ".join([str(snake_case_ ) for node in self] )
def __UpperCamelCase ( lowerCAmelCase__ : SortedLinkedList , lowerCAmelCase__ : SortedLinkedList ):
return SortedLinkedList(list(lowerCAmelCase__ ) + list(lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ =SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 90 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( __lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = PhobertTokenizer
_SCREAMING_SNAKE_CASE : int = False
def lowerCAmelCase (self : Optional[int] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Optional[int] = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
__a : Tuple = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__a : int = ['''#version: 0.2''', '''l à</w>''']
__a : List[Any] = {'''unk_token''': '''<unk>'''}
__a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n" )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(snake_case_ ) )
def lowerCAmelCase (self : Union[str, Any] , **snake_case_ : List[str] ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase (self : Any , snake_case_ : Dict ):
__a : Union[str, Any] = '''Tôi là VinAI Research'''
__a : int = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def lowerCAmelCase (self : Optional[Any] ):
__a : Optional[int] = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : Any = '''Tôi là VinAI Research'''
__a : Union[str, Any] = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
__a : List[str] = tokenizer.tokenize(snake_case_ )
print(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__a : str = tokens + [tokenizer.unk_token]
__a : str = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
| 90 | 1 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowercase : Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowercase : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
lowercase : Optional[Any] = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 )
lowercase : Optional[int] = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 255.0
lowercase : Tuple = image.transpose(0 , 3 , 1 , 2 )
lowercase : Dict = 2.0 * image - 1.0
lowercase : Optional[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
elif isinstance(image[0] , torch.Tensor ):
lowercase : Any = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
return image
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.9995 ) -> Any:
if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
lowercase : Optional[Any] = True
lowercase : Any = va.device
lowercase : Tuple = va.cpu().numpy()
lowercase : Dict = va.cpu().numpy()
lowercase : Any = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) )
if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD:
lowercase : Any = (1 - t) * va + t * va
else:
lowercase : int = np.arccos(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = np.sin(SCREAMING_SNAKE_CASE__ )
lowercase : str = theta_a * t
lowercase : List[Any] = np.sin(SCREAMING_SNAKE_CASE__ )
lowercase : int = np.sin(theta_a - theta_t ) / sin_theta_a
lowercase : int = sin_theta_t / sin_theta_a
lowercase : Dict = sa * va + sa * va
if inputs_are_torch:
lowercase : Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
return va
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : Union[str, Any] = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
lowercase : List[Any] = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
for param in model.parameters():
lowercase : List[str] = value
class __snake_case ( lowerCAmelCase ):
def __init__( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case=None ,snake_case=None ,snake_case=None ,):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=snake_case ,text_encoder=snake_case ,clip_model=snake_case ,tokenizer=snake_case ,unet=snake_case ,scheduler=snake_case ,feature_extractor=snake_case ,coca_model=snake_case ,coca_tokenizer=snake_case ,coca_transform=snake_case ,)
lowercase : Optional[int] = (
feature_extractor.size
if isinstance(feature_extractor.size ,snake_case )
else feature_extractor.size["""shortest_edge"""]
)
lowercase : Dict = transforms.Normalize(mean=feature_extractor.image_mean ,std=feature_extractor.image_std )
set_requires_grad(self.text_encoder ,snake_case )
set_requires_grad(self.clip_model ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.enable_attention_slicing(snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
set_requires_grad(self.vae ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
set_requires_grad(self.vae ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
set_requires_grad(self.unet ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
set_requires_grad(self.unet ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = min(int(num_inference_steps * strength ) ,snake_case )
lowercase : List[Any] = max(num_inference_steps - init_timestep ,0 )
lowercase : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case=None ):
'''simple docstring'''
if not isinstance(snake_case ,torch.Tensor ):
raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(snake_case )}" )
lowercase : List[str] = image.to(device=snake_case ,dtype=snake_case )
if isinstance(snake_case ,snake_case ):
lowercase : Optional[int] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case )
]
lowercase : Tuple = torch.cat(snake_case ,dim=0 )
else:
lowercase : List[str] = self.vae.encode(snake_case ).latent_dist.sample(snake_case )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase : Any = 0.18_215 * init_latents
lowercase : Dict = init_latents.repeat_interleave(snake_case ,dim=0 )
lowercase : List[str] = randn_tensor(init_latents.shape ,generator=snake_case ,device=snake_case ,dtype=snake_case )
# get latents
lowercase : Optional[int] = self.scheduler.add_noise(snake_case ,snake_case ,snake_case )
lowercase : Optional[Any] = init_latents
return latents
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Dict = self.coca_transform(snake_case ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowercase : List[Any] = self.coca_model.generate(transformed_image.to(device=self.device ,dtype=self.coca_model.dtype ) )
lowercase : int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" ,"""""" ).rstrip(""" .,""" )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = self.feature_extractor.preprocess(snake_case )
lowercase : Optional[int] = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
lowercase : List[Any] = self.clip_model.get_image_features(snake_case )
lowercase : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 ,dim=-1 ,keepdim=snake_case )
lowercase : Tuple = image_embeddings_clip.repeat_interleave(snake_case ,dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Optional[int] = latents.detach().requires_grad_()
lowercase : Optional[int] = self.scheduler.scale_model_input(snake_case ,snake_case )
# predict the noise residual
lowercase : Optional[int] = self.unet(snake_case ,snake_case ,encoder_hidden_states=snake_case ).sample
if isinstance(self.scheduler ,(PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowercase : Optional[int] = self.scheduler.alphas_cumprod[timestep]
lowercase : Union[str, Any] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase : Tuple = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowercase : int = torch.sqrt(snake_case )
lowercase : Any = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler ,snake_case ):
lowercase : Dict = self.scheduler.sigmas[index]
lowercase : Tuple = latents - sigma * noise_pred
else:
raise ValueError(f"scheduler type {type(self.scheduler )} not supported" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase : Optional[Any] = 1 / 0.18_215 * sample
lowercase : Union[str, Any] = self.vae.decode(snake_case ).sample
lowercase : str = (image / 2 + 0.5).clamp(0 ,1 )
lowercase : int = transforms.Resize(self.feature_extractor_size )(snake_case )
lowercase : Tuple = self.normalize(snake_case ).to(latents.dtype )
lowercase : Tuple = self.clip_model.get_image_features(snake_case )
lowercase : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 ,dim=-1 ,keepdim=snake_case )
lowercase : str = spherical_dist_loss(snake_case ,snake_case ).mean() * clip_guidance_scale
lowercase : List[Any] = -torch.autograd.grad(snake_case ,snake_case )[0]
if isinstance(self.scheduler ,snake_case ):
lowercase : Any = latents.detach() + grads * (sigma**2)
lowercase : Optional[Any] = noise_pred_original
else:
lowercase : int = noise_pred_original - torch.sqrt(snake_case ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self ,snake_case ,snake_case ,snake_case = None ,snake_case = None ,snake_case = 512 ,snake_case = 512 ,snake_case = 0.6 ,snake_case = 50 ,snake_case = 7.5 ,snake_case = 1 ,snake_case = 0.0 ,snake_case = 100 ,snake_case = None ,snake_case = "pil" ,snake_case = True ,snake_case = 0.8 ,snake_case = 0.1 ,snake_case = 0.1 ,):
'''simple docstring'''
if isinstance(snake_case ,snake_case ) and len(snake_case ) != batch_size:
raise ValueError(f"You have passed {batch_size} batch_size, but only {len(snake_case )} generators." )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if isinstance(snake_case ,torch.Generator ) and batch_size > 1:
lowercase : str = [generator] + [None] * (batch_size - 1)
lowercase : Union[str, Any] = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
lowercase : Optional[int] = [x[0] for x in coca_is_none if x[1]]
lowercase : int = """, """.join(snake_case )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(snake_case ):
raise ValueError(
f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
lowercase : Optional[int] = self.get_image_description(snake_case )
if style_prompt is None:
if len(snake_case ):
raise ValueError(
f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
lowercase : str = self.get_image_description(snake_case )
# get prompt text embeddings for content and style
lowercase : List[Any] = self.tokenizer(
snake_case ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,truncation=snake_case ,return_tensors="""pt""" ,)
lowercase : List[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowercase : Optional[int] = self.tokenizer(
snake_case ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,truncation=snake_case ,return_tensors="""pt""" ,)
lowercase : Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowercase : Optional[Any] = slerp(snake_case ,snake_case ,snake_case )
# duplicate text embeddings for each generation per prompt
lowercase : str = text_embeddings.repeat_interleave(snake_case ,dim=0 )
# set timesteps
lowercase : str = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowercase : Tuple = {}
if accepts_offset:
lowercase : int = 1
self.scheduler.set_timesteps(snake_case ,**snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowercase , lowercase : Optional[int] = self.get_timesteps(snake_case ,snake_case ,self.device )
lowercase : Tuple = timesteps[:1].repeat(snake_case )
# Preprocess image
lowercase : str = preprocess(snake_case ,snake_case ,snake_case )
lowercase : int = self.prepare_latents(
snake_case ,snake_case ,snake_case ,text_embeddings.dtype ,self.device ,snake_case )
lowercase : List[Any] = preprocess(snake_case ,snake_case ,snake_case )
lowercase : Tuple = self.prepare_latents(
snake_case ,snake_case ,snake_case ,text_embeddings.dtype ,self.device ,snake_case )
lowercase : List[str] = slerp(snake_case ,snake_case ,snake_case )
if clip_guidance_scale > 0:
lowercase : Union[str, Any] = self.get_clip_image_embeddings(snake_case ,snake_case )
lowercase : Optional[int] = self.get_clip_image_embeddings(snake_case ,snake_case )
lowercase : Optional[int] = slerp(
snake_case ,snake_case ,snake_case )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase : List[str] = content_text_input.input_ids.shape[-1]
lowercase : Optional[Any] = self.tokenizer([""""""] ,padding="""max_length""" ,max_length=snake_case ,return_tensors="""pt""" )
lowercase : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowercase : Any = uncond_embeddings.repeat_interleave(snake_case ,dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowercase : Tuple = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowercase : str = torch.randn(snake_case ,generator=snake_case ,device="""cpu""" ,dtype=snake_case ).to(
self.device )
else:
lowercase : Optional[int] = torch.randn(snake_case ,generator=snake_case ,device=self.device ,dtype=snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowercase : Any = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase : Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase : str = {}
if accepts_eta:
lowercase : str = eta
# check if the scheduler accepts generator
lowercase : Optional[Any] = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowercase : Tuple = generator
with self.progress_bar(total=snake_case ):
for i, t in enumerate(snake_case ):
# expand the latents if we are doing classifier free guidance
lowercase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase : Optional[int] = self.scheduler.scale_model_input(snake_case ,snake_case )
# predict the noise residual
lowercase : Dict = self.unet(snake_case ,snake_case ,encoder_hidden_states=snake_case ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowercase , lowercase : str = noise_pred.chunk(2 )
lowercase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowercase : int = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowercase , lowercase : Union[str, Any] = self.cond_fn(
snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,)
# compute the previous noisy sample x_t -> x_t-1
lowercase : Any = self.scheduler.step(snake_case ,snake_case ,snake_case ,**snake_case ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase : Optional[Any] = 1 / 0.18_215 * latents
lowercase : Any = self.vae.decode(snake_case ).sample
lowercase : Optional[Any] = (image / 2 + 0.5).clamp(0 ,1 )
lowercase : Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowercase : List[str] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=snake_case ,nsfw_content_detected=snake_case )
| 20 | def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
def get_matched_characters(lowerCamelCase : str , lowerCamelCase : str ) -> str:
UpperCamelCase_ : Tuple = []
UpperCamelCase_ : List[Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCamelCase_ : int = int(max(0 , i - limit ) )
UpperCamelCase_ : Dict = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCamelCase )
UpperCamelCase_ : Dict = F"{_stra[0:_stra.index(lowerCamelCase )]} {_stra[_stra.index(lowerCamelCase ) + 1:]}"
return "".join(lowerCamelCase )
# matching characters
UpperCamelCase_ : str = get_matched_characters(lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : str = get_matched_characters(lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = len(lowerCamelCase )
# transposition
UpperCamelCase_ : int = (
len([(ca, ca) for ca, ca in zip(lowerCamelCase , lowerCamelCase ) if ca != ca] ) // 2
)
if not match_count:
UpperCamelCase_ : Union[str, Any] = 0.0
else:
UpperCamelCase_ : str = (
1
/ 3
* (
match_count / len(lowerCamelCase )
+ match_count / len(lowerCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCamelCase_ : Dict = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 175 | 0 |
'''simple docstring'''
import argparse
import copy
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Dict = {}
with open(UpperCamelCase__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCAmelCase__ : str = []
_list.append([line.split()[1], line.split()[2]] )
UpperCAmelCase__ : str = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCAmelCase__ : Optional[Any] = []
_list.append([line.split()[0], line.split()[2]] )
UpperCAmelCase__ : Union[str, Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
with open(UpperCamelCase__ ) as f:
UpperCAmelCase__ : Any = f.read(1 )
UpperCAmelCase__ : Tuple = start_node
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : List[Any] = start_node
UpperCAmelCase__ : Any = 0
while visiting not in first_solution:
UpperCAmelCase__ : str = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(UpperCamelCase__ ) and k[0] not in first_solution:
UpperCAmelCase__ : Optional[int] = k[1]
UpperCAmelCase__ : Tuple = k[0]
first_solution.append(UpperCamelCase__ )
UpperCAmelCase__ : List[str] = distance_of_first_solution + int(UpperCamelCase__ )
UpperCAmelCase__ : Dict = best_node
first_solution.append(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCAmelCase__ : List[str] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : List[str] = []
for n in solution[1:-1]:
UpperCAmelCase__ : int = solution.index(UpperCamelCase__ )
for kn in solution[1:-1]:
UpperCAmelCase__ : Dict = solution.index(UpperCamelCase__ )
if n == kn:
continue
UpperCAmelCase__ : str = copy.deepcopy(UpperCamelCase__ )
UpperCAmelCase__ : str = kn
UpperCAmelCase__ : Any = n
UpperCAmelCase__ : Tuple = 0
for k in _tmp[:-1]:
UpperCAmelCase__ : Any = _tmp[_tmp.index(UpperCamelCase__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCAmelCase__ : str = distance + int(i[1] )
_tmp.append(UpperCamelCase__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCAmelCase__ : Tuple = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda UpperCamelCase__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : List[Any] = first_solution
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Dict = distance_of_first_solution
UpperCAmelCase__ : Union[str, Any] = solution
while count <= iters:
UpperCAmelCase__ : int = find_neighborhood(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Dict = neighborhood[index_of_best_solution]
UpperCAmelCase__ : Union[str, Any] = len(UpperCamelCase__ ) - 1
UpperCAmelCase__ : List[Any] = False
while not found:
UpperCAmelCase__ : int = 0
while i < len(UpperCamelCase__ ):
if best_solution[i] != solution[i]:
UpperCAmelCase__ : Union[str, Any] = best_solution[i]
UpperCAmelCase__ : Any = solution[i]
break
UpperCAmelCase__ : Any = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Any = best_solution[:-1]
UpperCAmelCase__ : List[str] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCAmelCase__ : str = cost
UpperCAmelCase__ : Optional[int] = solution
else:
UpperCAmelCase__ : str = index_of_best_solution + 1
UpperCAmelCase__ : Tuple = neighborhood[index_of_best_solution]
if len(UpperCamelCase__ ) >= size:
tabu_list.pop(0 )
UpperCAmelCase__ : Union[str, Any] = count + 1
return best_solution_ever, best_cost
def _UpperCamelCase ( UpperCamelCase__=None ):
UpperCAmelCase__ : Optional[int] = generate_neighbours(args.File )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = generate_first_solution(
args.File , UpperCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = tabu_search(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
__A =argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args()) | 283 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( a__ ):
lowerCAmelCase :Optional[int] = ['''image_processor''', '''tokenizer''']
lowerCAmelCase :Optional[int] = '''BridgeTowerImageProcessor'''
lowerCAmelCase :List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _lowerCamelCase , _lowerCamelCase):
super().__init__(_lowerCamelCase , _lowerCamelCase)
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCAmelCase__ : List[str] = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
# add pixel_values + pixel_mask
UpperCAmelCase__ : Optional[Any] = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , do_normalize=_lowerCamelCase , do_center_crop=_lowerCamelCase , **_lowerCamelCase)
encoding.update(_lowerCamelCase)
return encoding
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase)
@property
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.tokenizer.model_input_names
UpperCAmelCase__ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) | 283 | 1 |
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if len(_A ) < 2:
return collection
def circle_sort_util(_A , _A , _A ) -> bool:
snake_case_ = False
if low == high:
return swapped
snake_case_ = low
snake_case_ = high
while left < right:
if collection[left] > collection[right]:
snake_case_ , snake_case_ = (
collection[right],
collection[left],
)
snake_case_ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
snake_case_ , snake_case_ = (
collection[right + 1],
collection[left],
)
snake_case_ = True
snake_case_ = low + int((high - low) / 2 )
snake_case_ = circle_sort_util(_A , _A , _A )
snake_case_ = circle_sort_util(_A , mid + 1 , _A )
return swapped or left_swap or right_swap
snake_case_ = True
while is_not_sorted is True:
snake_case_ = circle_sort_util(_A , 0 , len(_A ) - 1 )
return collection
if __name__ == "__main__":
lowercase__ : List[Any] = input("Enter numbers separated by a comma:\n").strip()
lowercase__ : Dict = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 187 |
import numpy
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray , __lowercase : numpy.ndarray ):
"""simple docstring"""
snake_case_ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
snake_case_ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
snake_case_ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
snake_case_ = numpy.random.rand(3 , 1 )
# Real output values provided.
snake_case_ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
snake_case_ = numpy.zeros(output_array.shape )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
snake_case_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
snake_case_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
snake_case_ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
snake_case_ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case__ ( self : Optional[Any] , __lowercase : numpy.ndarray , __lowercase : int , __lowercase : bool ):
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
snake_case_ = self.feedforward()
self.back_propagation()
if give_loss:
snake_case_ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"Iteration {iteration} Loss: {loss}" )
def snake_case__ ( self : Union[str, Any] , __lowercase : numpy.ndarray ):
"""simple docstring"""
snake_case_ = input_arr
snake_case_ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
snake_case_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
snake_case_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return (value) * (1 - (value))
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
snake_case_ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
snake_case_ = TwoHiddenLayerNeuralNetwork(
input_array=_A , output_array=_A )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_A , iterations=10 , give_loss=_A )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 187 | 1 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
UpperCAmelCase: int = logging.get_logger(__name__)
UpperCAmelCase: List[Any] = {"""vocab_file""": """vocab.txt"""}
UpperCAmelCase: Union[str, Any] = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
UpperCAmelCase: str = {
"""facebook/esm2_t6_8M_UR50D""": 1_024,
"""facebook/esm2_t12_35M_UR50D""": 1_024,
}
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Dict = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_="<unk>" ,UpperCAmelCase_="<cls>" ,UpperCAmelCase_="<pad>" ,UpperCAmelCase_="<mask>" ,UpperCAmelCase_="<eos>" ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : Optional[Any] = load_vocab_file(UpperCAmelCase_ )
_lowercase : Union[str, Any] = dict(enumerate(self.all_tokens ) )
_lowercase : int = {tok: ind for ind, tok in enumerate(self.all_tokens )}
_lowercase : Tuple = unk_token
_lowercase : Optional[int] = cls_token
_lowercase : List[Any] = pad_token
_lowercase : Union[str, Any] = mask_token
_lowercase : int = eos_token
_lowercase : Optional[Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return self._id_to_token.get(UpperCAmelCase_ ,self.unk_token )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return self._token_to_id.get(UpperCAmelCase_ ,self._token_to_id.get(self.unk_token ) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
return text.split()
def lowerCamelCase__ ( self ,UpperCAmelCase_=False ):
return len(self._id_to_token )
def lowerCamelCase__ ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return self._token_to_id.get(UpperCAmelCase_ ,self._token_to_id.get(self.unk_token ) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return self._id_to_token.get(UpperCAmelCase_ ,self.unk_token )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Tuple = [self.cls_token_id]
_lowercase : Optional[Any] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
_lowercase : Union[str, Any] = [1] + ([0] * len(UpperCAmelCase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase_ ) + [1]
return mask
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[int] = os.path.join(UpperCAmelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(UpperCAmelCase_ ,"""w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def lowerCamelCase__ ( self ):
return self.get_vocab_size(with_added_tokens=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = False ):
return super()._add_tokens(UpperCAmelCase_ ,special_tokens=UpperCAmelCase_ )
| 336 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : str = tempfile.mkdtemp()
# fmt: off
_lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
_lowercase : Dict = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
_lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
_lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : List[Any] = self.get_image_processor()
_lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ )
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
_lowercase : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : int = self.prepare_image_inputs()
_lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" )
_lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : List[Any] = """lower newer"""
_lowercase : Any = processor(text=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : str = """lower newer"""
_lowercase : List[Any] = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : int = processor.batch_decode(UpperCAmelCase_ )
_lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Optional[Any] = """lower newer"""
_lowercase : Any = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 336 | 1 |
lowerCamelCase : str = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 124 | import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , __magic_name__ : Any , __magic_name__ : List[Any]=13 , __magic_name__ : List[Any]=2 , __magic_name__ : Tuple=24 , __magic_name__ : List[str]=16 , __magic_name__ : Dict=True , __magic_name__ : List[Any]=True , __magic_name__ : Optional[int]=32 , __magic_name__ : Tuple=5 , __magic_name__ : int=4 , __magic_name__ : Tuple=37 , __magic_name__ : List[str]="gelu" , __magic_name__ : Tuple=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Union[str, Any]=10 , __magic_name__ : Tuple=0.02 , __magic_name__ : Tuple=None , __magic_name__ : Any=2 , __magic_name__ : Dict=2 , ) -> int:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = num_mel_bins
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = frequency_stride
SCREAMING_SNAKE_CASE_ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
SCREAMING_SNAKE_CASE_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
SCREAMING_SNAKE_CASE_ = (self.max_length - self.patch_size) // self.time_stride + 1
SCREAMING_SNAKE_CASE_ = frequency_out_dimension * time_out_dimension
SCREAMING_SNAKE_CASE_ = num_patches + 2
def __A ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, input_values, labels
def __A ( self : Any ) -> Dict:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def __A ( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = ASTModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"input_values": input_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __A ( self : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Tuple ) -> Tuple:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def __A ( self : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = ASTModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def __A ( self : Union[str, Any] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def __A ( self : Optional[Any] ) -> Tuple:
pass
def __A ( self : int ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def __A ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ["input_values"]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
@slow
def __A ( self : int ) -> int:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = ASTModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torchaudio.load(__UpperCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : List[Any] ) -> List[Any]:
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def __A ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.default_feature_extractor
SCREAMING_SNAKE_CASE_ = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_feature_extractor
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = prepare_audio()
SCREAMING_SNAKE_CASE_ = audio.squeeze().numpy()
SCREAMING_SNAKE_CASE_ = feature_extractor(__magic_name__ , sampling_rate=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ )
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 118 | 0 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase =logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = True ,) -> Optional[int]:
A = [file for file in os.listdir(lowerCamelCase_ ) if os.path.isfile(os.path.join(lowerCamelCase_ ,lowerCamelCase_ ) )]
if identifier is not None:
A = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
for n_ in n_identifier:
A = [file for file in files if n_ not in file]
else:
A = [file for file in files if n_identifier not in file]
A = ignore_files or []
ignore_files.append("""__init__.py""" )
A = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" ,lowerCamelCase_ )
if only_modules:
A = file.split(""".""" )[0]
try:
A = getattr(lowerCamelCase_ ,lowerCamelCase_ )
A = doctest.DocTestSuite(lowerCamelCase_ )
A = unittest.TextTestRunner().run(lowerCamelCase_ )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(f'{module_identifier} is not a module.' )
else:
A = doctest.testfile(str("""..""" / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def UpperCamelCase__ ( self ) -> Dict:
A = Path("""src/transformers""" )
A = """modeling"""
A = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(lowerCamelCase_ ,identifier=lowerCamelCase_ ,ignore_files=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = Path("""src/transformers""" )
A = """tokenization"""
self.analyze_directory(lowerCamelCase_ ,identifier=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Tuple:
A = Path("""src/transformers""" )
A = """configuration"""
self.analyze_directory(lowerCamelCase_ ,identifier=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = Path("""src/transformers""" )
A = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(lowerCamelCase_ ,n_identifier=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> int:
A = Path("""docs/source""" )
A = ["""favicon.ico"""]
self.analyze_directory(lowerCamelCase_ ,ignore_files=lowerCamelCase_ ,only_modules=lowerCamelCase_ )
| 77 |
"""simple docstring"""
def _A ( _a : Optional[int] ):
"""simple docstring"""
A = []
A = set({"""(""", """[""", """{"""} )
A = set({""")""", """]""", """}"""} )
A = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(_a ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_a ) == 0 or (len(_a ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_a ) == 0
def _A ( ):
"""simple docstring"""
A = input("""Enter sequence of brackets: """ )
if is_balanced(_a ):
print(_a , """is balanced""" )
else:
print(_a , """is not balanced""" )
if __name__ == "__main__":
main()
| 77 | 1 |
def a_ ( lowerCAmelCase_ : str ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowerCAmelCase_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod()
| 284 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = filter(lambda lowerCAmelCase_ : p.requires_grad, model.parameters() )
__lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case : Dict = logging.getLogger(__name__)
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[int] ):
if metric == "rouge2":
__lowerCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__lowerCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__lowerCAmelCase = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
__lowerCAmelCase = ModelCheckpoint(
dirpath=lowerCAmelCase_, filename=lowerCAmelCase_, monitor=F"""val_{metric}""", mode='max', save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Any ):
return EarlyStopping(
monitor=F"""val_{metric}""", mode='min' if 'loss' in metric else 'max', patience=lowerCAmelCase_, verbose=lowerCAmelCase_, )
class _UpperCAmelCase ( pl.Callback ):
"""simple docstring"""
def lowercase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ) -> Any:
__lowerCAmelCase = {f"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase_ )
@rank_zero_only
def lowercase ( self : Optional[int] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowerCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__lowerCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowerCAmelCase = od / 'test_results.txt'
__lowerCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'a+' ) as writer:
for key in sorted(lowerCAmelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCAmelCase = metrics[key]
if isinstance(lowerCAmelCase_ , torch.Tensor ):
__lowerCAmelCase = val.item()
__lowerCAmelCase = f"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase_ )
if not save_generations:
return
if "preds" in metrics:
__lowerCAmelCase = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(lowerCAmelCase_ )
@rank_zero_only
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> Dict:
try:
__lowerCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCAmelCase = pl_module.model.num_parameters()
__lowerCAmelCase = count_trainable_parameters(lowerCAmelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def lowercase ( self : int , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ) -> Any:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , 'test' )
@rank_zero_only
def lowercase ( self : List[Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : Any ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 284 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : int = 'lilt'
def __init__(self : List[Any] , __UpperCAmelCase : Dict=3_0_5_2_2 , __UpperCAmelCase : Dict=7_6_8 , __UpperCAmelCase : Union[str, Any]=1_2 , __UpperCAmelCase : List[Any]=1_2 , __UpperCAmelCase : str=3_0_7_2 , __UpperCAmelCase : str="gelu" , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : Optional[int]=5_1_2 , __UpperCAmelCase : str=2 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : int=1E-12 , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Tuple="absolute" , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : int=4 , __UpperCAmelCase : Optional[int]=1_0_2_4 , **__UpperCAmelCase : List[Any] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = classifier_dropout
UpperCAmelCase__ = channel_shrink_ratio
UpperCAmelCase__ = max_ad_position_embeddings
| 354 | import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = '▁'
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : int = BigBirdTokenizer
__UpperCAmelCase : Optional[int] = BigBirdTokenizerFast
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : List[Any] = True
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
super().setUp()
UpperCAmelCase__ = self.tokenizer_class(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = "<s>"
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowercase_ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_4 )
def lowercase_ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def lowercase_ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = "I was born in 92000, and this is falsé."
UpperCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = BigBirdTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
UpperCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
UpperCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase_ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def lowercase_ (self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = "Hello World!"
UpperCAmelCase__ = [6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def lowercase_ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
UpperCAmelCase__ = [6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231
# fmt: on
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@require_torch
@slow
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
UpperCAmelCase__ = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors="pt" , return_token_type_ids=__UpperCAmelCase )
UpperCAmelCase__ = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__UpperCAmelCase )
UpperCAmelCase__ = BigBirdConfig(attention_type="original_full" )
UpperCAmelCase__ = BigBirdModel(__UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase )
model(**__UpperCAmelCase )
@slow
def lowercase_ (self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
UpperCAmelCase__ = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def lowercase_ (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = {"input_ids": [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 143 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_A : Union[str, Any] = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : int = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : List[str] = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Dict = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
_A : int = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
_A : Tuple = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
def UpperCamelCase_ ( snake_case_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def UpperCamelCase_ ( snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : Dict , snake_case_ : str , snake_case_ : Dict=False ) -> str:
'''simple docstring'''
__lowerCAmelCase = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
__lowerCAmelCase = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
__lowerCAmelCase = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
__lowerCAmelCase = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
__lowerCAmelCase = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
__lowerCAmelCase = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
__lowerCAmelCase = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
__lowerCAmelCase = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
__lowerCAmelCase = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
__lowerCAmelCase = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
__lowerCAmelCase = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
__lowerCAmelCase = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def UpperCamelCase_ ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : int , snake_case_ : str , snake_case_ : int=None ) -> List[Any]:
'''simple docstring'''
__lowerCAmelCase = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
__lowerCAmelCase = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
__lowerCAmelCase = checkpoint[f"""{old_prefix}.norm.weight"""]
__lowerCAmelCase = checkpoint[f"""{old_prefix}.norm.bias"""]
__lowerCAmelCase = weight_q.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = bias_q.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = weight_k.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = bias_k.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = weight_v.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = bias_v.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
__lowerCAmelCase = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : int ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = torch.load(lowerCAmelCase__ , map_location="""cpu""" )
__lowerCAmelCase = {}
__lowerCAmelCase = checkpoint['''time_embed.0.weight''']
__lowerCAmelCase = checkpoint['''time_embed.0.bias''']
__lowerCAmelCase = checkpoint['''time_embed.2.weight''']
__lowerCAmelCase = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
__lowerCAmelCase = checkpoint['''label_emb.weight''']
__lowerCAmelCase = checkpoint['''input_blocks.0.0.weight''']
__lowerCAmelCase = checkpoint['''input_blocks.0.0.bias''']
__lowerCAmelCase = unet_config['''down_block_types''']
__lowerCAmelCase = unet_config['''layers_per_block''']
__lowerCAmelCase = unet_config['''attention_head_dim''']
__lowerCAmelCase = unet_config['''block_out_channels''']
__lowerCAmelCase = 1
__lowerCAmelCase = channels_list[0]
for i, layer_type in enumerate(lowerCAmelCase__ ):
__lowerCAmelCase = channels_list[i]
__lowerCAmelCase = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowerCAmelCase__ ):
__lowerCAmelCase = f"""down_blocks.{i}.resnets.{j}"""
__lowerCAmelCase = f"""input_blocks.{current_layer}.0"""
__lowerCAmelCase = True if j == 0 and downsample_block_has_skip else False
__lowerCAmelCase = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , has_skip=lowerCAmelCase__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowerCAmelCase__ ):
__lowerCAmelCase = f"""down_blocks.{i}.resnets.{j}"""
__lowerCAmelCase = f"""input_blocks.{current_layer}.0"""
__lowerCAmelCase = True if j == 0 and downsample_block_has_skip else False
__lowerCAmelCase = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , has_skip=lowerCAmelCase__ )
__lowerCAmelCase = f"""down_blocks.{i}.attentions.{j}"""
__lowerCAmelCase = f"""input_blocks.{current_layer}.1"""
__lowerCAmelCase = convert_attention(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
current_layer += 1
if i != len(lowerCAmelCase__ ) - 1:
__lowerCAmelCase = f"""down_blocks.{i}.downsamplers.0"""
__lowerCAmelCase = f"""input_blocks.{current_layer}.0"""
__lowerCAmelCase = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
current_layer += 1
__lowerCAmelCase = current_channels
# hardcoded the mid-block for now
__lowerCAmelCase = '''mid_block.resnets.0'''
__lowerCAmelCase = '''middle_block.0'''
__lowerCAmelCase = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowerCAmelCase = '''mid_block.attentions.0'''
__lowerCAmelCase = '''middle_block.1'''
__lowerCAmelCase = convert_attention(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowerCAmelCase = '''mid_block.resnets.1'''
__lowerCAmelCase = '''middle_block.2'''
__lowerCAmelCase = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowerCAmelCase = 0
__lowerCAmelCase = unet_config['''up_block_types''']
for i, layer_type in enumerate(lowerCAmelCase__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCAmelCase = f"""up_blocks.{i}.resnets.{j}"""
__lowerCAmelCase = f"""output_blocks.{current_layer}.0"""
__lowerCAmelCase = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , has_skip=lowerCAmelCase__ )
current_layer += 1
if i != len(lowerCAmelCase__ ) - 1:
__lowerCAmelCase = f"""up_blocks.{i}.upsamplers.0"""
__lowerCAmelCase = f"""output_blocks.{current_layer-1}.1"""
__lowerCAmelCase = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCAmelCase = f"""up_blocks.{i}.resnets.{j}"""
__lowerCAmelCase = f"""output_blocks.{current_layer}.0"""
__lowerCAmelCase = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , has_skip=lowerCAmelCase__ )
__lowerCAmelCase = f"""up_blocks.{i}.attentions.{j}"""
__lowerCAmelCase = f"""output_blocks.{current_layer}.1"""
__lowerCAmelCase = convert_attention(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
current_layer += 1
if i != len(lowerCAmelCase__ ) - 1:
__lowerCAmelCase = f"""up_blocks.{i}.upsamplers.0"""
__lowerCAmelCase = f"""output_blocks.{current_layer-1}.2"""
__lowerCAmelCase = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowerCAmelCase = checkpoint['''out.0.weight''']
__lowerCAmelCase = checkpoint['''out.0.bias''']
__lowerCAmelCase = checkpoint['''out.2.weight''']
__lowerCAmelCase = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
_A : int = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
_A : List[str] = parser.parse_args()
_A : Optional[Any] = strabool(args.class_cond)
_A : Any = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
_A : List[str] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A : int = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_A : int = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
_A : List[Any] = None
_A : str = con_pt_to_diffuser(args.unet_path, unet_config)
_A : Optional[Any] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_A : Dict = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_A : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A : Optional[Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
_A : str = CMStochasticIterativeScheduler(**scheduler_config)
_A : Union[str, Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 229 |
def __UpperCamelCase ( lowerCAmelCase__ : int = 5_0_0_0_0_0_0_0 ):
__a : int = set()
__a : str = int((limit - 2_4) ** (1 / 2) )
__a : int = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowerCAmelCase__ ) ) )
for primea in primes:
__a : Union[str, Any] = primea * primea
for primea in primes:
__a : Union[str, Any] = primea * primea * primea
if square + cube >= limit - 1_6:
break
for primea in primes:
__a : int = primea * primea * primea * primea
__a : Union[str, Any] = square + cube + tetr
if total >= limit:
break
ret.add(lowerCAmelCase__ )
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 216 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
lowerCamelCase = False
lowerCamelCase = False
def _A ( _lowerCAmelCase ):
"""simple docstring"""
return TrainCommand(_lowerCAmelCase )
class _UpperCamelCase ( A ):
'''simple docstring'''
@staticmethod
def __lowerCamelCase ( _lowerCAmelCase : ArgumentParser):
'''simple docstring'''
__lowercase =parser.add_parser('train' , help='CLI tool to train a model on a task.')
train_parser.add_argument(
'--train_data' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=_lowerCAmelCase , default=0 , help='Column of the dataset csv file with example labels.')
train_parser.add_argument(
'--column_text' , type=_lowerCAmelCase , default=1 , help='Column of the dataset csv file with example texts.')
train_parser.add_argument(
'--column_id' , type=_lowerCAmelCase , default=2 , help='Column of the dataset csv file with example ids.')
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).')
train_parser.add_argument('--validation_data' , type=_lowerCAmelCase , default='' , help='path to validation dataset.')
train_parser.add_argument(
'--validation_split' , type=_lowerCAmelCase , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=_lowerCAmelCase , default='./' , help='path to saved the trained model.')
train_parser.add_argument(
'--task' , type=_lowerCAmelCase , default='text_classification' , help='Task to train the model on.')
train_parser.add_argument(
'--model' , type=_lowerCAmelCase , default='bert-base-uncased' , help='Model\'s name or path to stored model.')
train_parser.add_argument('--train_batch_size' , type=_lowerCAmelCase , default=3_2 , help='Batch size for training.')
train_parser.add_argument('--valid_batch_size' , type=_lowerCAmelCase , default=6_4 , help='Batch size for validation.')
train_parser.add_argument('--learning_rate' , type=_lowerCAmelCase , default=3e-5 , help='Learning rate.')
train_parser.add_argument('--adam_epsilon' , type=_lowerCAmelCase , default=1e-08 , help='Epsilon for Adam optimizer.')
train_parser.set_defaults(func=_lowerCAmelCase)
def __init__( self : Dict , _lowerCAmelCase : Namespace):
'''simple docstring'''
__lowercase =logging.get_logger('transformers-cli/training')
__lowercase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=_lowerCAmelCase)
__lowercase =args.output
__lowercase =args.column_label
__lowercase =args.column_text
__lowercase =args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""")
if args.task == "text_classification":
__lowercase =TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""")
__lowercase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__lowercase =None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""")
__lowercase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__lowercase =args.validation_split
__lowercase =args.train_batch_size
__lowercase =args.valid_batch_size
__lowercase =args.learning_rate
__lowercase =args.adam_epsilon
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
raise NotImplementedError
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 371 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """codegen"""
lowerCAmelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[Any]=5_0_4_0_0 , _lowerCAmelCase : Tuple=2_0_4_8 , _lowerCAmelCase : Dict=2_0_4_8 , _lowerCAmelCase : Tuple=4_0_9_6 , _lowerCAmelCase : Any=2_8 , _lowerCAmelCase : Optional[int]=1_6 , _lowerCAmelCase : int=6_4 , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[str]="gelu_new" , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Optional[int]=1e-5 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : int=True , _lowerCAmelCase : str=5_0_2_5_6 , _lowerCAmelCase : Any=5_0_2_5_6 , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : Dict , ):
'''simple docstring'''
__lowercase =vocab_size
__lowercase =n_ctx
__lowercase =n_positions
__lowercase =n_embd
__lowercase =n_layer
__lowercase =n_head
__lowercase =n_inner
__lowercase =rotary_dim
__lowercase =activation_function
__lowercase =resid_pdrop
__lowercase =embd_pdrop
__lowercase =attn_pdrop
__lowercase =layer_norm_epsilon
__lowercase =initializer_range
__lowercase =use_cache
__lowercase =bos_token_id
__lowercase =eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , **_lowerCAmelCase)
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : PretrainedConfig , _lowerCAmelCase : str = "default" , _lowerCAmelCase : List[PatchingSpec] = None , _lowerCAmelCase : bool = False , ):
'''simple docstring'''
super().__init__(_lowerCAmelCase , task=_lowerCAmelCase , patching_specs=_lowerCAmelCase , use_past=_lowerCAmelCase)
if not getattr(self._config , 'pad_token_id' , _lowerCAmelCase):
# TODO: how to do that better?
__lowercase =0
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs')
__lowercase ={0: 'batch', 1: 'past_sequence + sequence'}
else:
__lowercase ={0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
return self._config.n_layer
@property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self._config.n_head
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =super(_lowerCAmelCase , self).generate_dummy_inputs(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
# We need to order the input in the way they appears in the forward()
__lowercase =OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__lowercase , __lowercase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase =seqlen + 2
__lowercase =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase =[
(torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)) for _ in range(self.num_layers)
]
__lowercase =common_inputs['attention_mask']
if self.use_past:
__lowercase =ordered_inputs['attention_mask'].dtype
__lowercase =torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase)] , dim=1)
return ordered_inputs
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return 1_3
| 48 | 0 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : Dict = False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
return TrainCommand(_UpperCAmelCase )
class lowerCAmelCase ( __UpperCamelCase ):
@staticmethod
def A_ ( UpperCAmelCase : ArgumentParser ) -> Union[str, Any]:
lowerCamelCase__ : Optional[int] = parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=UpperCAmelCase , required=UpperCAmelCase , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=UpperCAmelCase , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=UpperCAmelCase , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=UpperCAmelCase , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=UpperCAmelCase , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=UpperCAmelCase , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=UpperCAmelCase , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=UpperCAmelCase , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=UpperCAmelCase , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=UpperCAmelCase , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=UpperCAmelCase , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=UpperCAmelCase , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=UpperCAmelCase , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=UpperCAmelCase )
def __init__( self : Optional[int] , UpperCAmelCase : Namespace ) -> Dict:
lowerCamelCase__ : List[Any] = logging.get_logger('transformers-cli/training' )
lowerCamelCase__ : Optional[int] = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=UpperCAmelCase )
lowerCamelCase__ : str = args.output
lowerCamelCase__ : List[str] = args.column_label
lowerCamelCase__ : int = args.column_text
lowerCamelCase__ : Any = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
lowerCamelCase__ : Optional[int] = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
lowerCamelCase__ : Optional[Any] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCamelCase__ : List[str] = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
lowerCamelCase__ : Union[str, Any] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCamelCase__ : Optional[Any] = args.validation_split
lowerCamelCase__ : Any = args.train_batch_size
lowerCamelCase__ : Any = args.valid_batch_size
lowerCamelCase__ : Dict = args.learning_rate
lowerCamelCase__ : Union[str, Any] = args.adam_epsilon
def A_ ( self : Optional[Any] ) -> Optional[int]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def A_ ( self : int ) -> Optional[Any]:
raise NotImplementedError
def A_ ( self : str ) -> str:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 50 |
from math import pi, sqrt
def __A ( __lowerCamelCase ) -> float:
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __A ( ) -> None:
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCamelCase : str = 1.0
while num:
__UpperCamelCase : Dict = float(input("Gamma of: "))
print(F'gamma({num}) = {gamma(num)}')
print("\nEnter 0 to exit...")
| 228 | 0 |
'''simple docstring'''
from math import factorial, radians
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : int = 18, _UpperCamelCase : int = 10 ) -> float:
A_ = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0)
# Converting from degrees to radians
A_ = radians(_UpperCamelCase )
A_ = angle_in_radians
A_ = 3
A_ = -1
for _ in range(_UpperCamelCase ):
result += (b * (angle_in_radians**a)) / factorial(_UpperCamelCase )
A_ = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_UpperCamelCase, _UpperCamelCase )
if __name__ == "__main__":
__import__('doctest').testmod()
| 18 | '''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__snake_case : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
__snake_case : str = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__snake_case : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _UpperCAmelCase ( _UpperCamelCase : str ) -> int:
with open(_UpperCamelCase, '''rb''' ) as f:
A_ = Image.open(_UpperCamelCase )
return im.convert('''RGB''' )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the training data.'} )
__lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the validation data.'} )
__lowercase : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __A ( self ) -> int:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_UpperCamelCase )} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
__lowercase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowercase : str = field(default=_UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
__lowercase : bool = field(
default=_UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__lowercase : bool = field(
default=_UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _UpperCAmelCase ( _UpperCamelCase : str ) -> Dict:
A_ = torch.stack([example['''pixel_values'''] for example in examples] )
A_ = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _UpperCAmelCase ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A_ ,A_ ,A_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A_ ,A_ ,A_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''', _UpperCamelCase, _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A_ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
A_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
A_ = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, task='''image-classification''', use_auth_token=True if model_args.use_auth_token else None, )
else:
A_ = {}
if data_args.train_dir is not None:
A_ = os.path.join(data_args.train_dir, '''**''' )
if data_args.validation_dir is not None:
A_ = os.path.join(data_args.validation_dir, '''**''' )
A_ = load_dataset(
'''imagefolder''', data_files=_UpperCamelCase, cache_dir=model_args.cache_dir, task='''image-classification''', )
# If we don't have a validation split, split off a percentage of train as validation.
A_ = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, _UpperCamelCase ) and data_args.train_val_split > 0.0:
A_ = dataset['''train'''].train_test_split(data_args.train_val_split )
A_ = split['''train''']
A_ = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A_ = dataset['''train'''].features['''labels'''].names
A_ ,A_ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
A_ = str(_UpperCamelCase )
A_ = label
# Load the accuracy metric from the datasets package
A_ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Optional[Any] ):
return metric.compute(predictions=np.argmax(p.predictions, axis=1 ), references=p.label_ids )
A_ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(_UpperCamelCase ), labelaid=_UpperCamelCase, idalabel=_UpperCamelCase, finetuning_task='''image-classification''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
A_ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=_UpperCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
A_ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
A_ = image_processor.size['''shortest_edge''']
else:
A_ = (image_processor.size['''height'''], image_processor.size['''width'''])
A_ = Normalize(mean=image_processor.image_mean, std=image_processor.image_std )
A_ = Compose(
[
RandomResizedCrop(_UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
A_ = Compose(
[
Resize(_UpperCamelCase ),
CenterCrop(_UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(_UpperCamelCase : Dict ):
A_ = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(_UpperCamelCase : Any ):
A_ = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
A_ = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
A_ = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_UpperCamelCase )
# Initalize our trainer
A_ = Trainer(
model=_UpperCamelCase, args=_UpperCamelCase, train_dataset=dataset['''train'''] if training_args.do_train else None, eval_dataset=dataset['''validation'''] if training_args.do_eval else None, compute_metrics=_UpperCamelCase, tokenizer=_UpperCamelCase, data_collator=_UpperCamelCase, )
# Training
if training_args.do_train:
A_ = None
if training_args.resume_from_checkpoint is not None:
A_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A_ = last_checkpoint
A_ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics('''train''', train_result.metrics )
trainer.save_metrics('''train''', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A_ = trainer.evaluate()
trainer.log_metrics('''eval''', _UpperCamelCase )
trainer.save_metrics('''eval''', _UpperCamelCase )
# Write model card and (optionally) push to hub
A_ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 18 | 1 |
from __future__ import annotations
import math
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a ) -> None:
snake_case_ = size
# approximate the overall size of segment tree with given value
snake_case_ = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
snake_case_ = [0 for i in range(0 , 4 * size )]
snake_case_ = [0 for i in range(0 , 4 * size )] # flag for lazy update
def _UpperCamelCase ( self , a ) -> int:
return idx * 2
def _UpperCamelCase ( self , a ) -> int:
return idx * 2 + 1
def _UpperCamelCase ( self , a , a , a , a ) -> None:
if left_element == right_element:
snake_case_ = a[left_element - 1]
else:
snake_case_ = (left_element + right_element) // 2
self.build(self.left(a ) , a , a , a )
self.build(self.right(a ) , mid + 1 , a , a )
snake_case_ = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
def _UpperCamelCase ( self , a , a , a , a , a , a ) -> bool:
if self.flag[idx] is True:
snake_case_ = self.lazy[idx]
snake_case_ = False
if left_element != right_element:
snake_case_ = self.lazy[idx]
snake_case_ = self.lazy[idx]
snake_case_ = True
snake_case_ = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
snake_case_ = val
if left_element != right_element:
snake_case_ = val
snake_case_ = val
snake_case_ = True
snake_case_ = True
return True
snake_case_ = (left_element + right_element) // 2
self.update(self.left(a ) , a , a , a , a , a )
self.update(self.right(a ) , mid + 1 , a , a , a , a )
snake_case_ = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
return True
def _UpperCamelCase ( self , a , a , a , a , a ) -> int | float:
if self.flag[idx] is True:
snake_case_ = self.lazy[idx]
snake_case_ = False
if left_element != right_element:
snake_case_ = self.lazy[idx]
snake_case_ = self.lazy[idx]
snake_case_ = True
snake_case_ = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
snake_case_ = (left_element + right_element) // 2
snake_case_ = self.query(self.left(a ) , a , a , a , a )
snake_case_ = self.query(self.right(a ) , mid + 1 , a , a , a )
return max(a , a )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , a , a ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
lowercase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowercase = 15
lowercase = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 178 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
lowercase = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
lowerCAmelCase = field(default=snake_case_ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase = field(default=snake_case_ , metadata={'''help''': '''whether to use adafactor'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(default=snake_case_ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(
default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 178 | 1 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> bool:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = [int(_lowerCamelCase) for i in ip_va_address.split(".") if i.isdigit()]
return len(_lowerCamelCase) == 4 and all(0 <= int(_lowerCamelCase) <= 254 for octet in octets)
if __name__ == "__main__":
lowercase : List[str] = input().strip()
lowercase : Union[str, Any] = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f"{ip} is a {valid_or_invalid} IP v4 address.") | 366 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(__lowercase)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :Any , **a :Union[str, Any] ) -> Union[str, Any]:
super().__init__(**a )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self :Any , a :Union[str, List[str], "Image", List["Image"]] , **a :Tuple ) -> List[str]:
return super().__call__(a , **a )
def _lowerCamelCase ( self :List[Any] , **a :List[str] ) -> List[Any]:
__UpperCamelCase : List[Any] = {}
if "candidate_labels" in kwargs:
__UpperCamelCase : Optional[int] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
__UpperCamelCase : List[str] = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def _lowerCamelCase ( self :List[str] , a :Optional[int] , a :List[str]=None , a :Dict="This is a photo of {}." ) -> Any:
__UpperCamelCase : Dict = load_image(a )
__UpperCamelCase : Any = self.image_processor(images=[image] , return_tensors=self.framework )
__UpperCamelCase : str = candidate_labels
__UpperCamelCase : List[Any] = [hypothesis_template.format(a ) for x in candidate_labels]
__UpperCamelCase : List[Any] = self.tokenizer(a , return_tensors=self.framework , padding=a )
__UpperCamelCase : Any = [text_inputs]
return inputs
def _lowerCamelCase ( self :Union[str, Any] , a :Optional[Any] ) -> List[Any]:
__UpperCamelCase : List[str] = model_inputs.pop("candidate_labels" )
__UpperCamelCase : Dict = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , a ):
__UpperCamelCase : Optional[Any] = text_inputs[0]
else:
# Batching case.
__UpperCamelCase : int = text_inputs[0][0]
__UpperCamelCase : str = self.model(**a , **a )
__UpperCamelCase : List[Any] = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def _lowerCamelCase ( self :List[Any] , a :List[Any] ) -> Tuple:
__UpperCamelCase : Any = model_outputs.pop("candidate_labels" )
__UpperCamelCase : Optional[Any] = model_outputs["logits"][0]
if self.framework == "pt":
__UpperCamelCase : int = logits.softmax(dim=-1 ).squeeze(-1 )
__UpperCamelCase : List[str] = probs.tolist()
if not isinstance(a , a ):
__UpperCamelCase : List[Any] = [scores]
elif self.framework == "tf":
__UpperCamelCase : Optional[int] = stable_softmax(a , axis=-1 )
__UpperCamelCase : Dict = probs.numpy().tolist()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
__UpperCamelCase : Tuple = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(a , a ) , key=lambda a : -x[0] )
]
return result | 151 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : list ) -> list:
"""simple docstring"""
if len(UpperCamelCase__ ) <= 1:
return [tuple(UpperCamelCase__ )]
__lowerCamelCase = []
def generate(UpperCamelCase__ : int , UpperCamelCase__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , UpperCamelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__lowerCamelCase , __lowerCamelCase = arr[k - 1], arr[i]
else: # k is odd
__lowerCamelCase , __lowerCamelCase = arr[k - 1], arr[0]
generate(k - 1 , UpperCamelCase__ )
generate(len(UpperCamelCase__ ) , UpperCamelCase__ )
return res
if __name__ == "__main__":
__A = input("Enter numbers separated by a comma:\n").strip()
__A = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 90 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=16 , lowerCamelCase__=[32, 64, 128] , lowerCamelCase__=[1, 2, 1] , lowerCamelCase__=[2, 2, 4] , lowerCamelCase__=2 , lowerCamelCase__=2.0 , lowerCamelCase__=True , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__="gelu" , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=0.02 , lowerCamelCase__=1e-5 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=10 , lowerCamelCase__=8 , lowerCamelCase__=["stage1", "stage2"] , lowerCamelCase__=[1, 2] , ) -> int:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = hidden_sizes
__lowerCamelCase = depths
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = patch_norm
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = is_training
__lowerCamelCase = scope
__lowerCamelCase = use_labels
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = encoder_stride
__lowerCamelCase = out_features
__lowerCamelCase = out_indices
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = FocalNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__lowerCamelCase = None
__lowerCamelCase = FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = FocalNetForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCamelCase = 1
__lowerCamelCase = FocalNetForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = self.type_sequence_label_size
__lowerCamelCase = FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase = 1
__lowerCamelCase = FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case_ = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = FocalNetModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , embed_dim=37 , has_text_modality=lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> str:
'''simple docstring'''
return
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# FocalNet has a different seq_length
__lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__lowerCamelCase = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = reshaped_hidden_states[0].shape
__lowerCamelCase = (
reshaped_hidden_states[0].view(lowerCamelCase__ , lowerCamelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = 3
__lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
@slow
def lowercase_ ( self ) -> str:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = FocalNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
__lowerCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (FocalNetBackbone,) if is_torch_available() else ()
snake_case_ = FocalNetConfig
snake_case_ = False
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = FocalNetModelTester(self )
| 90 | 1 |
"""simple docstring"""
import sys
from collections import defaultdict
class A_ :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = []
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.node_position[vertex]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = pos
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCAmelCase_ : List[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCAmelCase_ : Dict = 2 * start + 1
else:
UpperCAmelCase_ : List[str] = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = heap[smallest_child], positions[smallest_child]
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = (
heap[start],
positions[start],
)
UpperCAmelCase_ , UpperCAmelCase_ : int = temp, tempa
UpperCAmelCase_ : Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowercase_ )
self.top_to_bottom(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = position[index]
while index != 0:
UpperCAmelCase_ : str = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCAmelCase_ : Any = heap[parent]
UpperCAmelCase_ : Tuple = position[parent]
self.set_position(position[parent] , lowercase_ )
else:
UpperCAmelCase_ : Optional[int] = val
UpperCAmelCase_ : Tuple = temp
self.set_position(lowercase_ , lowercase_ )
break
UpperCAmelCase_ : Dict = parent
else:
UpperCAmelCase_ : Optional[Any] = val
UpperCAmelCase_ : Tuple = temp
self.set_position(lowercase_ , 0 )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = len(lowercase_ ) // 2 - 1
for i in range(lowercase_ , -1 , -1 ):
self.top_to_bottom(lowercase_ , lowercase_ , len(lowercase_ ) , lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = positions[0]
UpperCAmelCase_ : Dict = sys.maxsize
self.top_to_bottom(lowercase_ , 0 , len(lowercase_ ) , lowercase_ )
return temp
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = Heap()
UpperCAmelCase_ : List[str] = [0] * len(__lowerCamelCase )
UpperCAmelCase_ : Any = [-1] * len(__lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCAmelCase_ : Any = [] # Heap of Distance of vertices from their neighboring vertex
UpperCAmelCase_ : Optional[Any] = []
for vertex in range(len(__lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__lowerCamelCase )
heap.node_position.append(__lowerCamelCase )
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Tuple = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[Any] = distance
heap.heapify(__lowerCamelCase, __lowerCamelCase )
for _ in range(1, len(__lowerCamelCase ) ):
UpperCAmelCase_ : List[str] = heap.delete_minimum(__lowerCamelCase, __lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCAmelCase_ : Optional[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__lowerCamelCase )]
):
UpperCAmelCase_ : Any = distance
heap.bottom_to_top(
__lowerCamelCase, heap.get_position(__lowerCamelCase ), __lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : Tuple = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_a = int(input('Enter number of edges: ').strip())
_a = defaultdict(list)
for _ in range(edges_number):
_a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 23 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = "ylacombe/bark-small"
UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase_ : List[str] = "en_speaker_1"
UpperCAmelCase_ : Tuple = "This is a test string"
UpperCAmelCase_ : List[Any] = "speaker_embeddings_path.json"
UpperCAmelCase_ : Any = "speaker_embeddings"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase_ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase_ : int = 35
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : List[Any] = 8
UpperCAmelCase_ : Optional[Any] = {
"semantic_prompt": np.ones(lowercase_ ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : Dict = processor(text=self.input_string , voice_preset=lowercase_ )
UpperCAmelCase_ : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , "file.npz" )
np.savez(lowercase_ , **lowercase_ )
UpperCAmelCase_ : Optional[int] = processor(text=self.input_string , voice_preset=lowercase_ )
UpperCAmelCase_ : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
UpperCAmelCase_ : Tuple = processor(text=self.input_string )
UpperCAmelCase_ : Union[str, Any] = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 23 | 1 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : int = (UnCLIPScheduler,)
def _snake_case ( self , **__A ):
"""simple docstring"""
lowerCamelCase : List[Any] = {
"num_train_timesteps": 1000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**__A )
return config
def _snake_case ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def _snake_case ( self ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__A )
def _snake_case ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def _snake_case ( self ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__A )
def _snake_case ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__A )
def _snake_case ( self ):
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__A , prev_timestep=__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase : Tuple = self.get_scheduler_config(variance_type="fixed_small_log" )
lowerCamelCase : List[str] = scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1e-5
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : int = self.get_scheduler_config(variance_type="learned_range" )
lowerCamelCase : Union[str, Any] = scheduler_class(**__A )
lowerCamelCase : Optional[int] = 0.5
assert scheduler._get_variance(1 , predicted_variance=__A ) - -10.1712790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=__A ) - -5.7998052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=__A ) - -0.0010011 < 1e-5
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : List[str] = self.get_scheduler_config()
lowerCamelCase : Union[str, Any] = scheduler_class(**__A )
lowerCamelCase : Any = scheduler.timesteps
lowerCamelCase : Optional[int] = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter
lowerCamelCase : List[Any] = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
lowerCamelCase : Optional[Any] = model(__A , __A )
# 2. predict previous mean of sample x_t-1
lowerCamelCase : Optional[Any] = scheduler.step(__A , __A , __A , generator=__A ).prev_sample
lowerCamelCase : List[Any] = pred_prev_sample
lowerCamelCase : str = torch.sum(torch.abs(__A ) )
lowerCamelCase : List[str] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase : int = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__A )
scheduler.set_timesteps(25 )
lowerCamelCase : str = scheduler.timesteps
lowerCamelCase : Optional[int] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter
lowerCamelCase : Dict = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
lowerCamelCase : Optional[Any] = model(__A , __A )
if i + 1 == timesteps.shape[0]:
lowerCamelCase : Union[str, Any] = None
else:
lowerCamelCase : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCamelCase : Any = scheduler.step(
__A , __A , __A , prev_timestep=__A , generator=__A ).prev_sample
lowerCamelCase : List[Any] = pred_prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__A ) )
lowerCamelCase : List[str] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
pass
| 283 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 , 0 , -1 ):
lowerCamelCase : Tuple = False
for j in range(SCREAMING_SNAKE_CASE_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCamelCase , lowerCamelCase : int = unsorted[j - 1], unsorted[j]
lowerCamelCase : Optional[int] = True
for j in range(SCREAMING_SNAKE_CASE_ ):
if unsorted[j] > unsorted[j + 1]:
lowerCamelCase , lowerCamelCase : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowerCamelCase : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input('''Enter numbers separated by a comma:\n''').strip()
_snake_case = [int(item) for item in user_input.split(''',''')]
print(f'''{cocktail_shaker_sort(unsorted) = }''')
| 283 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
_lowerCamelCase : Tuple = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
_lowerCamelCase : List[str] = '''</w>'''
_lowerCamelCase : Tuple = '''@@ '''
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
return pairs
# Speech2Text2 has no max input length
_lowerCamelCase : Union[str, Any] = {'''facebook/s2t-wav2vec2-large-en-de''': 10_24}
class lowercase ( a ):
lowercase__ : Any = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Optional[int]="</s>" , _UpperCamelCase : List[str]="<unk>" , _UpperCamelCase : Tuple=False , _UpperCamelCase : List[str]=None , **_UpperCamelCase : Optional[int] , ) -> List[str]:
'''simple docstring'''
super().__init__(
unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , do_lower_case=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = do_lower_case
with open(_UpperCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
else:
with open(_UpperCamelCase , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split()[:2] ) for merge in merges]
SCREAMING_SNAKE_CASE = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {}
@property
def __snake_case( self : Union[str, Any] ) -> int:
'''simple docstring'''
return len(self.decoder )
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case( self : List[str] , _UpperCamelCase : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = get_pairs(_UpperCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(_UpperCamelCase , key=lambda _UpperCamelCase : self.bpe_ranks.get(_UpperCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(_UpperCamelCase ):
try:
SCREAMING_SNAKE_CASE = word.index(_UpperCamelCase , _UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(_UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(_UpperCamelCase )
SCREAMING_SNAKE_CASE = new_word
if len(_UpperCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = " ".join(_UpperCamelCase )
if word == "\n " + BPE_TOKEN_MERGES:
SCREAMING_SNAKE_CASE = "\n" + BPE_TOKEN_MERGES
if word.endswith(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = word.replace(_UpperCamelCase , "" )
SCREAMING_SNAKE_CASE = word.replace(" " , _UpperCamelCase )
SCREAMING_SNAKE_CASE = word
return word
def __snake_case( self : int , _UpperCamelCase : Dict ) -> List[Any]:
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
SCREAMING_SNAKE_CASE = text.lower()
SCREAMING_SNAKE_CASE = text.split()
SCREAMING_SNAKE_CASE = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_UpperCamelCase ).split(" " ) ) )
return split_tokens
def __snake_case( self : str , _UpperCamelCase : str ) -> int:
'''simple docstring'''
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token ) )
def __snake_case( self : Optional[int] , _UpperCamelCase : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.decoder.get(_UpperCamelCase , self.unk_token )
return result
def __snake_case( self : Any , _UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = " ".join(_UpperCamelCase )
# make sure @@ tokens are concatenated
SCREAMING_SNAKE_CASE = "".join(string.split(_UpperCamelCase ) )
return string
def __snake_case( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase ) + "\n" )
SCREAMING_SNAKE_CASE = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(_UpperCamelCase ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 364 | import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase :
lowercase__ : str = None
@experimental
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any ):
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return _map_with_joblib(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = num_proc if num_proc <= len(UpperCAmelCase__ ) else len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = [] # We organize the splits ourselve (contiguous splits)
for index in range(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) // num_proc
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) % num_proc
SCREAMING_SNAKE_CASE = div * index + min(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(UpperCAmelCase__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"Error dividing inputs iterable among processes. "
F"Total number of objects {len(UpperCAmelCase__ )}, "
F"length: {sum(len(i[1] ) for i in split_kwds )}" )
logger.info(
F"Spawning {num_proc} processes for {len(UpperCAmelCase__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}" )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None, None
if not disable_tqdm:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (RLock(),), tqdm.set_lock
with Pool(UpperCAmelCase__ , initargs=UpperCAmelCase__ , initializer=UpperCAmelCase__ ) as pool:
SCREAMING_SNAKE_CASE = pool.map(UpperCAmelCase__ , UpperCAmelCase__ )
logger.info(F"Finished {num_proc} processes" )
SCREAMING_SNAKE_CASE = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"Unpacked {len(UpperCAmelCase__ )} objects" )
return mapped
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any ):
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=UpperCAmelCase__ ):
return joblib.Parallel()(
joblib.delayed(UpperCAmelCase__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def __lowerCamelCase (UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
SCREAMING_SNAKE_CASE = None
| 206 | 0 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {"vocab_file": "vocab.txt"}
_lowerCamelCase : List[str] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowerCamelCase : List[Any] = {
"facebook/esm2_t6_8M_UR50D": 1_0_2_4,
"facebook/esm2_t12_35M_UR50D": 1_0_2_4,
}
def a__ ( UpperCAmelCase : List[str] ) -> Any:
with open(UpperCAmelCase , '''r''' ) as f:
UpperCAmelCase : Dict = f.read().splitlines()
return [l.strip() for l in lines]
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Any, __A : Dict, __A : List[Any]="<unk>", __A : List[str]="<cls>", __A : Any="<pad>", __A : Union[str, Any]="<mask>", __A : int="<eos>", **__A : Tuple, ):
super().__init__(**__A )
UpperCAmelCase : Tuple = load_vocab_file(__A )
UpperCAmelCase : List[Any] = dict(enumerate(self.all_tokens ) )
UpperCAmelCase : str = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase : Union[str, Any] = unk_token
UpperCAmelCase : Optional[Any] = cls_token
UpperCAmelCase : Optional[int] = pad_token
UpperCAmelCase : Optional[int] = mask_token
UpperCAmelCase : List[str] = eos_token
UpperCAmelCase : Optional[Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __magic_name__ ( self : Tuple, __A : int ):
return self._id_to_token.get(__A, self.unk_token )
def __magic_name__ ( self : List[Any], __A : str ):
return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) )
def __magic_name__ ( self : Any, __A : Optional[Any], **__A : Union[str, Any] ):
return text.split()
def __magic_name__ ( self : Optional[int], __A : Dict=False ):
return len(self._id_to_token )
def __magic_name__ ( self : int ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __magic_name__ ( self : Tuple, __A : str ):
return self._token_to_id.get(__A, self._token_to_id.get(self.unk_token ) )
def __magic_name__ ( self : Any, __A : int ):
return self._id_to_token.get(__A, self.unk_token )
def __magic_name__ ( self : Union[str, Any], __A : List[int], __A : Optional[List[int]] = None ):
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : Optional[int] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __magic_name__ ( self : Any, __A : List, __A : Optional[List] = None, __A : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase : Dict = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
mask += [0] * len(__A ) + [1]
return mask
def __magic_name__ ( self : Optional[int], __A : List[Any], __A : Dict ):
UpperCAmelCase : Union[str, Any] = os.path.join(__A, (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(__A, '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __magic_name__ ( self : Dict ):
return self.get_vocab_size(with_added_tokens=__A )
def __magic_name__ ( self : Optional[int], __A : Union[List[str], List[AddedToken]], __A : bool = False ):
return super()._add_tokens(__A, special_tokens=__A )
| 336 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[int], __A : Optional[int], __A : Any=1_3, __A : str=7, __A : Optional[int]=True, __A : Tuple=True, __A : Union[str, Any]=True, __A : Any=True, __A : Optional[int]=9_9, __A : Tuple=3_2, __A : str=5, __A : Union[str, Any]=4, __A : List[str]=3_7, __A : Tuple="gelu", __A : Optional[int]=0.1, __A : int=0.1, __A : Optional[Any]=5_1_2, __A : int=1_6, __A : Optional[Any]=2, __A : Union[str, Any]=0.0_2, __A : Optional[int]=4, ):
UpperCAmelCase : Any = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Any = seq_length
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : str = use_attention_mask
UpperCAmelCase : List[str] = use_token_type_ids
UpperCAmelCase : int = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = num_choices
def __magic_name__ ( self : str ):
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase : List[Any] = None
if self.use_attention_mask:
UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Any = None
if self.use_token_type_ids:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = RobertaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__A, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__ ( self : int ):
UpperCAmelCase : Any = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = config_and_inputs
UpperCAmelCase : Any = True
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Dict = FlaxRobertaModelTester(self )
@slow
def __magic_name__ ( self : Any ):
for model_class_name in self.all_model_classes:
UpperCAmelCase : Dict = model_class_name.from_pretrained('''roberta-base''', from_pt=__A )
UpperCAmelCase : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
| 336 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> list:
'''simple docstring'''
if n_term == "":
return []
_lowerCamelCase : list = []
for temp in range(int(_lowerCamelCase ) ):
series.append(F"""1/{temp + 1}""" if series else "1" )
return series
if __name__ == "__main__":
_lowerCAmelCase : Tuple = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term)) | 340 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"]
_lowerCamelCase : Dict = len(example["content"] ) / len(output["input_ids"] )
return output
_lowerCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments)
_lowerCAmelCase : Optional[int] = parser.parse_args()
if args.num_workers is None:
_lowerCAmelCase : Any = multiprocessing.cpu_count()
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_lowerCAmelCase : Union[str, Any] = time.time()
_lowerCAmelCase : Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
_lowerCAmelCase : Any = time.time()
_lowerCAmelCase : Dict = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_lowerCAmelCase : str = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 340 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : int = UnCLIPImageVariationPipeline
lowerCamelCase__ : int = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
lowerCamelCase__ : Any = IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase__ : Dict = [
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
lowerCamelCase__ : Dict = False
@property
def _UpperCAmelCase ( self ) -> List[Any]:
return 3_2
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
return 3_2
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.time_input_dim
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def _UpperCAmelCase ( self ) -> List[Any]:
return 1_0_0
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowercase__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def _UpperCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
lowercase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowercase__ : Dict = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowercase__ : Dict = UnCLIPTextProjModel(**a )
return model
@property
def _UpperCAmelCase ( self ) -> str:
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = {
'sample_size': 3_2,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowercase__ : Optional[int] = UNetaDConditionModel(**a )
return model
@property
def _UpperCAmelCase ( self ) -> str:
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _UpperCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
lowercase__ : Dict = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _UpperCAmelCase ( self ) -> Tuple:
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
lowercase__ : Tuple = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Union[str, Any] = self.dummy_decoder
lowercase__ : Dict = self.dummy_text_proj
lowercase__ : List[Any] = self.dummy_text_encoder
lowercase__ : Tuple = self.dummy_tokenizer
lowercase__ : List[str] = self.dummy_super_res_first
lowercase__ : List[Any] = self.dummy_super_res_last
lowercase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_0_0_0 , )
lowercase__ : Tuple = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_0_0_0 , )
lowercase__ : Optional[Any] = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
lowercase__ : Any = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _UpperCAmelCase ( self , a , a=0 , a=True ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowercase__ : List[Any] = torch.manual_seed(a )
else:
lowercase__ : Any = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowercase__ : Dict = input_image * 0.5 + 0.5
lowercase__ : Tuple = input_image.clamp(0 , 1 )
lowercase__ : Optional[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase__ : List[str] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Optional[int] = 'cpu'
lowercase__ : List[str] = self.get_dummy_components()
lowercase__ : Any = self.pipeline_class(**a )
lowercase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(a , pil_image=a )
lowercase__ : Optional[int] = pipe(**a )
lowercase__ : int = output.images
lowercase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowercase__ : Union[str, Any] = pipe(
**a , return_dict=a , )[0]
lowercase__ : Dict = image[0, -3:, -3:, -1]
lowercase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase__ : Optional[int] = np.array(
[
0.9_997,
0.0_002,
0.9_997,
0.9_997,
0.9_969,
0.0_023,
0.9_997,
0.9_969,
0.9_970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Any = 'cpu'
lowercase__ : Optional[int] = self.get_dummy_components()
lowercase__ : Union[str, Any] = self.pipeline_class(**a )
lowercase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowercase__ : List[Any] = pipe(**a )
lowercase__ : List[str] = output.images
lowercase__ : Any = self.get_dummy_inputs(a , pil_image=a )
lowercase__ : Tuple = pipe(
**a , return_dict=a , )[0]
lowercase__ : str = image[0, -3:, -3:, -1]
lowercase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase__ : Any = np.array([0.9_997, 0.0_003, 0.9_997, 0.9_997, 0.9_970, 0.0_024, 0.9_997, 0.9_971, 0.9_971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = 'cpu'
lowercase__ : Tuple = self.get_dummy_components()
lowercase__ : List[str] = self.pipeline_class(**a )
lowercase__ : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowercase__ : Optional[int] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowercase__ : Any = pipe(**a )
lowercase__ : List[str] = output.images
lowercase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowercase__ : Optional[Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowercase__ : List[str] = pipe(
**a , return_dict=a , )[0]
lowercase__ : Optional[int] = image[0, -3:, -3:, -1]
lowercase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
lowercase__ : Tuple = np.array(
[
0.9_997,
0.9_989,
0.0_008,
0.0_021,
0.9_960,
0.0_018,
0.0_014,
0.0_002,
0.9_933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = torch.device('cpu' )
class UpperCAmelCase_ :
lowerCamelCase__ : Union[str, Any] = 1
lowercase__ : str = self.get_dummy_components()
lowercase__ : str = self.pipeline_class(**a )
lowercase__ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : Optional[Any] = torch.Generator(device=a ).manual_seed(0 )
lowercase__ : int = pipe.decoder.dtype
lowercase__ : Optional[int] = 1
lowercase__ : Optional[int] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowercase__ : Tuple = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowercase__ : List[Any] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowercase__ : str = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowercase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowercase__ : Optional[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowercase__ : Union[str, Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowercase__ : Optional[Any] = pipeline_inputs.pop('image' )
lowercase__ : Dict = pipe.image_encoder(a ).image_embeds
lowercase__ : List[str] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def _UpperCAmelCase ( self ) -> str:
lowercase__ : List[Any] = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowercase__ : Optional[int] = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = torch_device == 'cpu'
lowercase__ : List[Any] = True
lowercase__ : List[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowercase__ : Union[str, Any] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _UpperCAmelCase ( self ) -> Any:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _UpperCAmelCase ( self ) -> List[Any]:
return super().test_save_load_local()
@skip_mps
def _UpperCAmelCase ( self ) -> Tuple:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowercase__ : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowercase__ : Optional[Any] = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowercase__ : int = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowercase__ : Any = torch.Generator(device='cpu' ).manual_seed(0 )
lowercase__ : Dict = pipeline(
a , generator=a , output_type='np' , )
lowercase__ : List[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(a , a , 1_5 )
| 77 | """simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 77 | 1 |
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 351 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : int=99 , SCREAMING_SNAKE_CASE__ : List[str]=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=37 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=512 , SCREAMING_SNAKE_CASE__ : List[Any]=16 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : str=0 , ) -> List[Any]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = projection_dim
def a ( self : Any ) -> Any:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
lowerCAmelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
lowerCAmelCase__ = TFDPRContextEncoder(config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
lowerCAmelCase__ = TFDPRQuestionEncoder(config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
lowerCAmelCase__ = TFDPRReader(config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def a ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
snake_case__ = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : Dict ) -> int:
lowerCAmelCase__ = TFDPRModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def a ( self : List[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def a ( self : Tuple ) -> Dict:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] ) -> Dict:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : List[str] ) -> Union[str, Any]:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRQuestionEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRReader.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase__ = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
lowerCAmelCase__ = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCAmelCase__ = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 221 | 0 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _A : Tuple , _A : Dict=14 , _A : int=7 , _A : List[Any]=True , _A : Any=True , _A : Dict=True , _A : Dict=True , _A : Union[str, Any]=True , _A : Any=99 , _A : int=32 , _A : Tuple=5 , _A : Union[str, Any]=4 , _A : Union[str, Any]=37 , _A : str="gelu" , _A : str=0.1 , _A : int=0.1 , _A : str=512 , _A : Dict=16 , _A : Optional[Any]=2 , _A : str=0.02 , _A : Optional[int]=3 , _A : int=4 , _A : Union[str, Any]=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : Tuple = seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = is_training
__SCREAMING_SNAKE_CASE : Any = use_token_type_ids
__SCREAMING_SNAKE_CASE : int = use_input_mask
__SCREAMING_SNAKE_CASE : Tuple = use_labels
__SCREAMING_SNAKE_CASE : List[str] = use_mc_token_ids
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : Dict = hidden_size
__SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Any = max_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = type_vocab_size
__SCREAMING_SNAKE_CASE : int = type_sequence_label_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
__SCREAMING_SNAKE_CASE : List[Any] = num_choices
__SCREAMING_SNAKE_CASE : Dict = scope
__SCREAMING_SNAKE_CASE : int = self.vocab_size - 1
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : int = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Tuple = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : int = None
if self.use_mc_token_ids:
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def UpperCAmelCase__ ( self : str , _A : Union[str, Any] , _A : Any , _A : Optional[Any] , _A : List[str] , _A : Optional[Any] , *_A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = CTRLModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
model(__UpperCamelCase , token_type_ids=__UpperCamelCase , head_mask=__UpperCamelCase )
model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def UpperCAmelCase__ ( self : List[Any] , _A : str , _A : Dict , _A : List[Any] , _A : Tuple , _A : Any , *_A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = CTRLLMHeadModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : str = model(__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
__SCREAMING_SNAKE_CASE
) : Any = config_and_inputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self : int , _A : List[Any] , _A : int , _A : int , _A : List[str] , *_A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = CTRLForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Tuple = model(__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __UpperCamelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCAmelCase_ = (CTRLLMHeadModel,) if is_torch_available() else ()
lowerCAmelCase_ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] , _A : Optional[Any] , _A : List[Any] , _A : Dict , _A : Optional[Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = CTRLModelTester(self )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=__UpperCamelCase , n_embd=37 )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__UpperCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
pass
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[int] = CTRLModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
pass
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(__UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=__UpperCamelCase ) # Legal the president is
__SCREAMING_SNAKE_CASE : int = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__SCREAMING_SNAKE_CASE : Optional[int] = model.generate(__UpperCamelCase , do_sample=__UpperCamelCase )
self.assertListEqual(output_ids[0].tolist() , __UpperCamelCase )
| 303 | from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __snake_case ( _lowerCamelCase ):
@staticmethod
@abstractmethod
def __a ( __UpperCamelCase ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def __a ( self ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError()
| 143 | 0 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.txt'''}
UpperCamelCase__ = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
UpperCamelCase__ = {
'''openbmb/cpm-ant-10b''': 1_0_2_4,
}
def a__ ( lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : Any = collections.OrderedDict()
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as reader:
UpperCAmelCase__ : Tuple = reader.readlines()
for index, token in enumerate(lowerCAmelCase__ ):
UpperCAmelCase__ : int = token.rstrip('''\n''' )
UpperCAmelCase__ : Union[str, Any] = index
return vocab
class lowerCamelCase_ ( __a ):
def __init__( self : List[Any] , _A : Optional[Any] , _A : Any="<unk>" , _A : Optional[Any]=200 ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = vocab
UpperCAmelCase__ : int = unk_token
UpperCAmelCase__ : Optional[int] = max_input_chars_per_word
def lowercase_ ( self : Optional[Any] , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = list(_A )
if len(_A ) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : str = []
while start < len(_A ):
UpperCAmelCase__ : Dict = len(_A )
UpperCAmelCase__ : List[Any] = None
while start < end:
UpperCAmelCase__ : int = ''''''.join(chars[start:end] )
if substr in self.vocab:
UpperCAmelCase__ : str = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_A )
UpperCAmelCase__ : Optional[int] = end
return sub_tokens
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = False
def __init__( self : Dict , _A : Tuple , _A : Optional[int]="<d>" , _A : List[Any]="</d>" , _A : List[Any]="<s>" , _A : Optional[int]="</s>" , _A : Optional[int]="<pad>" , _A : Optional[Any]="<unk>" , _A : Tuple="</n>" , _A : Union[str, Any]="</_>" , _A : Union[str, Any]="left" , **_A : Dict , ):
'''simple docstring'''
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , )
UpperCAmelCase__ : Any = bod_token
UpperCAmelCase__ : List[Any] = eod_token
UpperCAmelCase__ : Union[str, Any] = load_vocab(_A )
UpperCAmelCase__ : List[Any] = self.encoder[space_token]
UpperCAmelCase__ : List[str] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCAmelCase__ : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
UpperCAmelCase__ : Tuple = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : List[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def lowercase_ ( self : str ):
'''simple docstring'''
return self.encoder["\n"]
@property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return len(self.encoder )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Union[str, Any] , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = []
for x in jieba.cut(_A , cut_all=_A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) )
return output_tokens
def lowercase_ ( self : Tuple , _A : Any , **_A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = [i for i in token_ids if i >= 0]
UpperCAmelCase__ : Union[str, Any] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_A , **_A )
def lowercase_ ( self : Union[str, Any] , _A : str ):
'''simple docstring'''
return token in self.encoder
def lowercase_ ( self : Dict , _A : List[str] ):
'''simple docstring'''
return "".join(_A )
def lowercase_ ( self : Optional[Any] , _A : Union[str, Any] ):
'''simple docstring'''
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : Union[str, Any] , _A : List[str] ):
'''simple docstring'''
return self.decoder.get(_A , self.unk_token )
def lowercase_ ( self : Tuple , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
if os.path.isdir(_A ):
UpperCAmelCase__ : Tuple = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCAmelCase__ : Dict = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCAmelCase__ : Optional[Any] = 0
if " " in self.encoder:
UpperCAmelCase__ : Union[str, Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCAmelCase__ : Union[str, Any] = self.encoder['''\n''']
del self.encoder["\n"]
UpperCAmelCase__ : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase__ : List[Any] = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def lowercase_ ( self : List[Any] , _A : List[int] , _A : List[int] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowercase_ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A ))
| 299 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'new-model'
if is_tf_available():
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = NewModelConfig
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = '''bert-base-cased'''
UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Dict = TFAutoModel.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = '''bert-base-cased'''
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[str] = TFAutoModelForPreTraining.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : str = TFAutoModelForCausalLM.from_pretrained(_A )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = TFAutoModelForCausalLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_A )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(_A )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Dict = TFAutoModelForQuestionAnswering.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
@require_tensorflow_probability
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
UpperCAmelCase__ : List[str] = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(_A )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(
_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=_A ) , 14_410 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=_A ) , 14_410 )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Any = copy.deepcopy(model.config )
UpperCAmelCase__ : Tuple = ['''FunnelBaseModel''']
UpperCAmelCase__ : int = TFAutoModel.from_config(_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
UpperCAmelCase__ : str = TFAutoModel.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
try:
AutoConfig.register('''new-model''' , _A )
UpperCAmelCase__ : List[Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_A ):
auto_class.register(_A , _A )
auto_class.register(_A , _A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
auto_class.register(_A , _A )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase__ : Tuple = BertModelTester(self ).get_config()
UpperCAmelCase__ : str = NewModelConfig(**tiny_config.to_dict() )
UpperCAmelCase__ : str = auto_class.from_config(_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
UpperCAmelCase__ : str = auto_class.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def lowercase_ ( self : str ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase__ : Dict = TFAutoModel.from_pretrained('''bert-base''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase__ : int = TFAutoModel.from_pretrained(_A , revision='''aaaaaa''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
UpperCAmelCase__ : List[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaisesRegex(_A , '''Use `from_pt=True` to load this model''' ):
UpperCAmelCase__ : int = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase__ : Union[str, Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
UpperCAmelCase__ : Optional[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
UpperCAmelCase__ : List[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 299 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[Any] , _A : Dict , _A : Tuple=13 , _A : Any=30 , _A : Optional[Any]=2 , _A : Optional[int]=3 , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : Optional[Any]=32 , _A : List[Any]=2 , _A : str=4 , _A : Any=37 , _A : Optional[Any]="gelu" , _A : List[str]=0.1 , _A : Dict=0.1 , _A : Optional[Any]=10 , _A : str=0.0_2 , _A : str=3 , _A : Dict=None , _A : List[str]=2 , ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : Any = image_size
snake_case_ : List[str] = patch_size
snake_case_ : Tuple = num_channels
snake_case_ : List[Any] = is_training
snake_case_ : List[str] = use_labels
snake_case_ : int = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : Dict = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Optional[Any] = type_sequence_label_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[str] = scope
snake_case_ : List[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
snake_case_ : List[str] = (image_size // patch_size) ** 2
snake_case_ : Dict = num_patches + 2
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Optional[int] = None
if self.use_labels:
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase_ ( self : str , _A : List[Any] , _A : Dict , _A : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ : List[str] = TFDeiTModel(config=UpperCamelCase__ )
snake_case_ : Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Any , _A : Optional[int] , _A : Tuple , _A : str ) -> Optional[int]:
"""simple docstring"""
snake_case_ : int = TFDeiTForMaskedImageModeling(config=UpperCamelCase__ )
snake_case_ : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ : str = 1
snake_case_ : Optional[int] = TFDeiTForMaskedImageModeling(UpperCamelCase__ )
snake_case_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : Dict = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self : Optional[int] , _A : Dict , _A : Tuple , _A : Tuple ) -> int:
"""simple docstring"""
snake_case_ : Optional[Any] = self.type_sequence_label_size
snake_case_ : Optional[int] = TFDeiTForImageClassification(UpperCamelCase__ )
snake_case_ : Tuple = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ : List[str] = 1
snake_case_ : str = TFDeiTForImageClassification(UpperCamelCase__ )
snake_case_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : Optional[int] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self : int ) -> str:
"""simple docstring"""
snake_case_ : int = self.prepare_config_and_inputs()
snake_case_ : Any = config_and_inputs
snake_case_ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
__magic_name__: List[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__magic_name__: Optional[Any] = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__magic_name__: Dict = False
__magic_name__: Tuple = False
__magic_name__: str = False
__magic_name__: List[Any] = False
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[int] = TFDeiTModelTester(self )
snake_case_ : Any = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Dense ) )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
"""simple docstring"""
snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(UpperCamelCase__ )
snake_case_ : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Any = [*signature.parameters.keys()]
snake_case_ : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : int , _A : Union[str, Any] , _A : List[str] , _A : List[Any]=False ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Any = TFDeiTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self : str ) -> List[str]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self : str ) -> str:
"""simple docstring"""
snake_case_ : int = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
snake_case_ : str = self.default_image_processor
snake_case_ : Optional[Any] = prepare_img()
snake_case_ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='tf' )
# forward pass
snake_case_ : Optional[int] = model(**UpperCamelCase__ )
# verify the logits
snake_case_ : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
snake_case_ : Optional[Any] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 327 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( UpperCamelCase__ : int = 1000 ):
'''simple docstring'''
UpperCamelCase__ = 2**power
UpperCamelCase__ = 0
while n:
UpperCamelCase__ , UpperCamelCase__ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 362 | def lowerCamelCase_ ( UpperCamelCase__ : list[int], UpperCamelCase__ : list[int], UpperCamelCase__ : int ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCamelCase__ ) )
def lowerCamelCase_ ( UpperCamelCase__ : list[list[int]], UpperCamelCase__ : int, UpperCamelCase__ : list[int], UpperCamelCase__ : int ):
'''simple docstring'''
if index == len(UpperCamelCase__ ):
return True
# Recursive Step
for i in range(UpperCamelCase__ ):
if valid_coloring(graph[index], UpperCamelCase__, UpperCamelCase__ ):
# Color current vertex
UpperCamelCase__ = i
# Validate coloring
if util_color(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, index + 1 ):
return True
# Backtrack
UpperCamelCase__ = -1
return False
def lowerCamelCase_ ( UpperCamelCase__ : list[list[int]], UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = [-1] * len(UpperCamelCase__ )
if util_color(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, 0 ):
return colored_vertices
return []
| 35 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''spm_char.model'''}
__lowerCamelCase : Dict = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
__lowerCamelCase : List[str] = {
'''microsoft/speecht5_asr''': 10_24,
'''microsoft/speecht5_tts''': 10_24,
'''microsoft/speecht5_vc''': 10_24,
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self : int,_A : Tuple,_A : str="<s>",_A : int="</s>",_A : int="<unk>",_A : Tuple="<pad>",_A : Optional[Dict[str, Any]] = None,**_A : List[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_A,eos_token=_A,unk_token=_A,pad_token=_A,sp_model_kwargs=self.sp_model_kwargs,**_A,)
SCREAMING_SNAKE_CASE_ : List[str] = vocab_file
SCREAMING_SNAKE_CASE_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : str = None
return state
def __setstate__( self : Dict,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self : List[str],_A : str ):
"""simple docstring"""
return self.sp_model.encode(_A,out_type=_A )
def __UpperCamelCase ( self : str,_A : int ):
"""simple docstring"""
return self.sp_model.piece_to_id(_A )
def __UpperCamelCase ( self : str,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.sp_model.IdToPiece(_A )
return token
def __UpperCamelCase ( self : Any,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_A ) + token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
else:
current_sub_tokens.append(_A )
out_string += self.sp_model.decode(_A )
return out_string.strip()
def __UpperCamelCase ( self : str,_A : List[Any],_A : Any=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A,token_ids_a=_A,already_has_special_tokens=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1]
if token_ids_a is None:
return ([0] * len(_A )) + suffix_ones
return ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones
def __UpperCamelCase ( self : str,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A,"wb" ) as fi:
SCREAMING_SNAKE_CASE_ : Any = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 18 | def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | 1 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
SCREAMING_SNAKE_CASE__ = {
'n_samples': 6_4,
'horizon': 3_2,
'num_inference_steps': 2_0,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = 'hopper-medium-v2'
SCREAMING_SNAKE_CASE__ = gym.make(env_name)
SCREAMING_SNAKE_CASE__ = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
SCREAMING_SNAKE_CASE__ = env.reset()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1_0_0_0
SCREAMING_SNAKE_CASE__ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
SCREAMING_SNAKE_CASE__ = pipeline(obs, planning_horizon=3_2)
# execute action in environment
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = env.step(denorm_actions)
SCREAMING_SNAKE_CASE__ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
SCREAMING_SNAKE_CASE__ = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}')
| 366 |
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE__ = 'scheduler_config.json'
class a_ ( lowerCamelCase ):
lowercase = 1
lowercase = 2
lowercase = 3
lowercase = 4
lowercase = 5
lowercase = 6
lowercase = 7
lowercase = 8
lowercase = 9
lowercase = 10
lowercase = 11
lowercase = 12
lowercase = 13
lowercase = 14
@dataclass
class a_ ( lowerCamelCase ):
lowercase = 42
class a_ :
lowercase = SCHEDULER_CONFIG_NAME
lowercase = []
lowercase = True
@classmethod
def A__ ( cls , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = cls.load_config(
pretrained_model_name_or_path=_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE , return_unused_kwargs=_SCREAMING_SNAKE_CASE , return_commit_hash=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
return cls.from_config(_SCREAMING_SNAKE_CASE , return_unused_kwargs=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
self.save_config(save_directory=_SCREAMING_SNAKE_CASE , push_to_hub=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def A__ ( cls ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase = importlib.import_module(__name__.split(""".""" )[0] )
UpperCamelCase = [
getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for c in compatible_classes_str if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
return compatible_classes
| 183 | 0 |
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod() | 189 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase( UpperCAmelCase_ ):
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , UpperCAmelCase_ , )
if isinstance(UpperCAmelCase_ , torch.Tensor ):
return image
elif isinstance(UpperCAmelCase_ , PIL.Image.Image ):
UpperCAmelCase : List[str] = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCAmelCase , UpperCAmelCase : List[str] = image[0].size
UpperCAmelCase , UpperCAmelCase : str = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
UpperCAmelCase : str = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
UpperCAmelCase : Optional[int] = np.concatenate(UpperCAmelCase_ , axis=0 )
UpperCAmelCase : List[Any] = np.array(UpperCAmelCase_ ).astype(np.floataa ) / 255.0
UpperCAmelCase : Any = image.transpose(0 , 3 , 1 , 2 )
UpperCAmelCase : Any = 2.0 * image - 1.0
UpperCAmelCase : List[str] = torch.from_numpy(UpperCAmelCase_ )
elif isinstance(image[0] , torch.Tensor ):
UpperCAmelCase : List[Any] = torch.cat(UpperCAmelCase_ , dim=0 )
return image
def UpperCamelCase( UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , torch.Tensor ):
return mask
elif isinstance(UpperCAmelCase_ , PIL.Image.Image ):
UpperCAmelCase : List[str] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
UpperCAmelCase , UpperCAmelCase : List[Any] = mask[0].size
UpperCAmelCase , UpperCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase : Tuple = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
UpperCAmelCase : Optional[int] = np.concatenate(UpperCAmelCase_ , axis=0 )
UpperCAmelCase : Any = mask.astype(np.floataa ) / 255.0
UpperCAmelCase : str = 0
UpperCAmelCase : Dict = 1
UpperCAmelCase : Optional[Any] = torch.from_numpy(UpperCAmelCase_ )
elif isinstance(mask[0] , torch.Tensor ):
UpperCAmelCase : List[str] = torch.cat(UpperCAmelCase_ , dim=0 )
return mask
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : UNetaDModel
UpperCAmelCase_ : RePaintScheduler
def __init__( self : List[str] , lowercase_ : List[str] , lowercase_ : Tuple ) -> Tuple:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : Union[torch.Tensor, PIL.Image.Image] , lowercase_ : Union[torch.Tensor, PIL.Image.Image] , lowercase_ : int = 250 , lowercase_ : float = 0.0 , lowercase_ : int = 10 , lowercase_ : int = 10 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase : Dict = image
UpperCAmelCase : Optional[int] = _preprocess_image(lowercase_ )
UpperCAmelCase : Optional[Any] = original_image.to(device=self.device , dtype=self.unet.dtype )
UpperCAmelCase : Optional[Any] = _preprocess_mask(lowercase_ )
UpperCAmelCase : List[str] = mask_image.to(device=self.device , dtype=self.unet.dtype )
UpperCAmelCase : Any = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase : List[str] = original_image.shape
UpperCAmelCase : str = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowercase_ , lowercase_ , lowercase_ , self.device )
UpperCAmelCase : Tuple = eta
UpperCAmelCase : Optional[int] = self.scheduler.timesteps[0] + 1
UpperCAmelCase : List[Any] = generator[0] if isinstance(lowercase_ , lowercase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
UpperCAmelCase : str = self.unet(lowercase_ , lowercase_ ).sample
# compute previous image: x_t -> x_t-1
UpperCAmelCase : Dict = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
UpperCAmelCase : int = self.scheduler.undo_step(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Union[str, Any] = t
UpperCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 151 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = StableDiffusionPanoramaPipeline
lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_snake_case = DDIMScheduler()
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_snake_case = CLIPTextModel(lowerCAmelCase_ )
_snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Union[str, Any]:
_snake_case = torch.manual_seed(lowerCAmelCase_ )
_snake_case = {
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase ( self ) -> int:
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionPanoramaPipeline(**lowerCAmelCase_ )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = sd_pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self ) -> Optional[int]:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase ( self ) -> Optional[Any]:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionPanoramaPipeline(**lowerCAmelCase_ )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = 'french fries'
_snake_case = sd_pipe(**lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionPanoramaPipeline(**lowerCAmelCase_ )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = sd_pipe(**lowerCAmelCase_ , view_batch_size=2 )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' )
_snake_case = StableDiffusionPanoramaPipeline(**lowerCAmelCase_ )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = sd_pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , skip_prk_steps=lowerCAmelCase_ )
_snake_case = StableDiffusionPanoramaPipeline(**lowerCAmelCase_ )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = sd_pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self , lowerCAmelCase_=0 ) -> List[Any]:
_snake_case = torch.manual_seed(lowerCAmelCase_ )
_snake_case = {
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase ( self ) -> Any:
_snake_case = 'stabilityai/stable-diffusion-2-base'
_snake_case = DDIMScheduler.from_pretrained(lowerCAmelCase_ , subfolder='scheduler' )
_snake_case = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase_ , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_snake_case = np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def lowerCAmelCase ( self ) -> Any:
_snake_case = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=lowerCAmelCase_ )
_snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_snake_case = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCAmelCase ( self ) -> int:
_snake_case = 0
def callback_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_snake_case = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_snake_case = latents[0, -3:, -3:, -1]
_snake_case = np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_snake_case = latents[0, -3:, -3:, -1]
_snake_case = np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_snake_case = False
_snake_case = 'stabilityai/stable-diffusion-2-base'
_snake_case = DDIMScheduler.from_pretrained(lowerCAmelCase_ , subfolder='scheduler' )
_snake_case = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase_ , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
_snake_case = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
pipe(**lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCAmelCase ( self ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_snake_case = 'stabilityai/stable-diffusion-2-base'
_snake_case = DDIMScheduler.from_pretrained(lowerCAmelCase_ , subfolder='scheduler' )
_snake_case = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase_ , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
_snake_case = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_snake_case = self.get_inputs()
_snake_case = pipe(**lowerCAmelCase_ )
_snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 295 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase_ = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ = """ResNetConfig"""
# Base docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = [1, 2048, 7, 7]
# Image classification docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = """tiger cat"""
UpperCAmelCase_ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Convad(
lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> Dict:
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.pooler(lowerCAmelCase_ )
return embedding
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ) -> List[Any]:
super().__init__()
_snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Any:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ) -> List[str]:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Tuple:
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> BaseModelOutputWithNoAttention:
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowerCAmelCase_ )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = ResNetConfig
lowerCAmelCase_ = '''resnet'''
lowerCAmelCase_ = '''pixel_values'''
lowerCAmelCase_ = True
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if isinstance(lowerCAmelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = value
UpperCAmelCase_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCAmelCase_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__(lowerCAmelCase_ )
_snake_case = config
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowerCAmelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__(lowerCAmelCase_ )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowerCAmelCase_ )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> ImageClassifierOutputWithNoAttention:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowerCAmelCase_ )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
super().__init__(lowerCAmelCase_ )
super()._init_backbone(lowerCAmelCase_ )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BackboneOutput:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
| 295 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__: Optional[int] = logging.get_logger(__name__)
def snake_case_ ( _lowerCAmelCase : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Tuple = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
UpperCAmelCase : Tuple = 1024
UpperCAmelCase : List[Any] = 4096
UpperCAmelCase : str = 24
UpperCAmelCase : List[Any] = 16
UpperCAmelCase : str = [5, 11, 17, 23]
UpperCAmelCase : List[Any] = [256, 512, 1024, 1024]
UpperCAmelCase : Tuple = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 768
UpperCAmelCase : Tuple = [1, 1, 1, 0.5]
UpperCAmelCase : int = [256, 512, 768, 768]
UpperCAmelCase : Any = 150
UpperCAmelCase : Tuple = 16
UpperCAmelCase : Any = (1, 384, 384)
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Tuple = '''project'''
if "ade" in checkpoint_url:
UpperCAmelCase : Any = True
UpperCAmelCase : str = 768
UpperCAmelCase : Optional[int] = [1, 1, 1, 0.5]
UpperCAmelCase : List[Any] = 150
UpperCAmelCase : List[Any] = 16
UpperCAmelCase : str = '''huggingface/label-files'''
UpperCAmelCase : Tuple = '''ade20k-id2label.json'''
UpperCAmelCase : Any = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase : Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : List[Any] = idalabel
UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = [1, 150, 480, 480]
return config, expected_shape
def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> int:
UpperCAmelCase : List[str] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Tuple ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase : Tuple = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
UpperCAmelCase : int = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
UpperCAmelCase : Tuple = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
UpperCAmelCase : Any = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
UpperCAmelCase : str = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
UpperCAmelCase : Any = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
UpperCAmelCase : Optional[int] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase : Optional[Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
UpperCAmelCase : Dict = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
UpperCAmelCase : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
UpperCAmelCase : Tuple = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
UpperCAmelCase : str = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
UpperCAmelCase : Dict = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
UpperCAmelCase : int = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
UpperCAmelCase : Tuple = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
UpperCAmelCase : int = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
UpperCAmelCase : List[str] = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase : str = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
UpperCAmelCase : List[str] = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
UpperCAmelCase : Any = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
UpperCAmelCase : Optional[int] = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
UpperCAmelCase : Tuple = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase : Dict = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase : int = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase : Any = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase : Optional[Any] = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase : List[Any] = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase : Any = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase : Optional[int] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase : str = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase : List[str] = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase : Tuple = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase : int = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
UpperCAmelCase : Optional[int] = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
UpperCAmelCase : Dict = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
UpperCAmelCase : Any = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
UpperCAmelCase : Optional[int] = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
UpperCAmelCase : List[Any] = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
UpperCAmelCase : Optional[int] = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
UpperCAmelCase : Optional[Any] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
UpperCAmelCase : Optional[int] = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
UpperCAmelCase : List[Any] = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
UpperCAmelCase : List[str] = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
UpperCAmelCase : List[Any] = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
UpperCAmelCase : List[Any] = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
UpperCAmelCase : Tuple = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Optional[int] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
UpperCAmelCase : Tuple = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Tuple = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : int = in_proj_bias[: config.hidden_size]
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def snake_case_ ( ) -> List[str]:
UpperCAmelCase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ) -> Any:
UpperCAmelCase , UpperCAmelCase : int = get_dpt_config(_lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
UpperCAmelCase : List[Any] = torch.load(_lowerCAmelCase , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(_lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase : Any = state_dict.pop(_lowerCAmelCase )
UpperCAmelCase : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
UpperCAmelCase : Optional[Any] = DPTForSemanticSegmentation(_lowerCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# Check outputs on an image
UpperCAmelCase : int = 480 if '''ade''' in checkpoint_url else 384
UpperCAmelCase : List[Any] = DPTImageProcessor(size=_lowerCAmelCase )
UpperCAmelCase : Dict = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(_lowerCAmelCase , return_tensors='''pt''' )
# forward pass
UpperCAmelCase : Any = model(**_lowerCAmelCase ).logits if '''ade''' in checkpoint_url else model(**_lowerCAmelCase ).predicted_depth
if show_prediction:
UpperCAmelCase : Dict = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=_lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
UpperCamelCase__: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
UpperCamelCase__: Tuple = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 23 |
'''simple docstring'''
from manim import *
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Optional[Any] = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase : Tuple = [mem.copy() for i in range(6 )]
UpperCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase : Dict = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Any = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Union[str, Any] = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Optional[Any] = Text('''CPU''' , font_size=24 )
UpperCAmelCase : Union[str, Any] = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase : Union[str, Any] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : List[str] = Text('''GPU''' , font_size=24 )
UpperCAmelCase : Dict = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
UpperCAmelCase : int = [mem.copy() for i in range(6 )]
UpperCAmelCase : Union[str, Any] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : List[str] = Text('''Model''' , font_size=24 )
UpperCAmelCase : Tuple = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
UpperCAmelCase : Any = []
for i, rect in enumerate(__snake_case ):
rect.set_stroke(__snake_case )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__snake_case , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__snake_case , buff=0.0 )
self.add(__snake_case )
cpu_targs.append(__snake_case )
UpperCAmelCase : int = [mem.copy() for i in range(6 )]
UpperCAmelCase : int = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Any = Text('''Loaded Checkpoint''' , font_size=24 )
UpperCAmelCase : Union[str, Any] = Group(__snake_case , __snake_case ).arrange(__snake_case , aligned_edge=__snake_case , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase : str = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case , __snake_case )
UpperCAmelCase : Tuple = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase : List[Any] = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) , Write(__snake_case ) )
self.play(Write(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) )
UpperCAmelCase : Tuple = []
UpperCAmelCase : int = []
for i, rect in enumerate(__snake_case ):
UpperCAmelCase : Any = fill.copy().set_fill(__snake_case , opacity=0.7 )
target.move_to(__snake_case )
first_animations.append(GrowFromCenter(__snake_case , run_time=1 ) )
UpperCAmelCase : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(*__snake_case )
self.wait()
| 23 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__A : int = logging.get_logger(__name__)
class __UpperCamelCase ( _A ):
def __init__(self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : str):
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
| 57 |
"""simple docstring"""
from __future__ import annotations
class __UpperCamelCase :
def __init__(self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str):
A , A = text, pattern
A , A = len(__SCREAMING_SNAKE_CASE), len(__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str):
for i in range(self.patLen - 1 , -1 , -1):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE__ (self : str , __SCREAMING_SNAKE_CASE : int):
for i in range(self.patLen - 1 , -1 , -1):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
# searches pattern in text and returns index positions
A = []
for i in range(self.textLen - self.patLen + 1):
A = self.mismatch_in_text(__SCREAMING_SNAKE_CASE)
if mismatch_index == -1:
positions.append(__SCREAMING_SNAKE_CASE)
else:
A = self.match_in_pattern(self.text[mismatch_index])
A = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__A : int = 'ABAABA'
__A : Optional[Any] = 'AB'
__A : Any = BoyerMooreSearch(text, pattern)
__A : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 57 | 1 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__a: Dict = logging.get_logger(__name__)
__a: Dict = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase ( __UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'umt5'
SCREAMING_SNAKE_CASE = ['past_key_values']
def __init__( self , __lowerCAmelCase=250112 , __lowerCAmelCase=512 , __lowerCAmelCase=64 , __lowerCAmelCase=1024 , __lowerCAmelCase=8 , __lowerCAmelCase=None , __lowerCAmelCase=6 , __lowerCAmelCase=32 , __lowerCAmelCase=128 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=1.0 , __lowerCAmelCase="gated-gelu" , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="T5Tokenizer" , __lowerCAmelCase=True , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , **__lowerCAmelCase , ) -> Optional[int]:
super().__init__(
is_encoder_decoder=__lowerCAmelCase , tokenizer_class=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase__ : int = vocab_size
lowercase__ : Optional[int] = d_model
lowercase__ : Tuple = d_kv
lowercase__ : List[str] = d_ff
lowercase__ : List[Any] = num_layers
lowercase__ : str = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ : Tuple = num_heads
lowercase__ : Any = relative_attention_num_buckets
lowercase__ : Any = relative_attention_max_distance
lowercase__ : List[str] = dropout_rate
lowercase__ : Any = layer_norm_epsilon
lowercase__ : Any = initializer_factor
lowercase__ : List[Any] = feed_forward_proj
lowercase__ : Tuple = use_cache
lowercase__ : Tuple = self.feed_forward_proj.split('''-''' )
lowercase__ : Tuple = act_info[-1]
lowercase__ : Any = act_info[0] == """gated"""
if len(__lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(__lowerCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
lowercase__ : str = """gelu_new"""
@property
def _lowerCAmelCase( self ) -> Union[str, Any]:
return self.d_model
@property
def _lowerCAmelCase( self ) -> int:
return self.num_heads
@property
def _lowerCAmelCase( self ) -> Dict:
return self.num_layers
class UpperCAmelCase ( __UpperCAmelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Dict = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
lowercase__ : Optional[int] = """past_encoder_sequence + sequence"""
lowercase__ : Optional[Any] = {0: """batch"""}
lowercase__ : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowercase__ : int = {0: """batch""", 1: """decoder_sequence"""}
lowercase__ : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _lowerCAmelCase( self ) -> Dict:
return 13
@property
def _lowerCAmelCase( self ) -> Tuple:
return 5E-4
| 198 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = VideoMAEConfig()
set_architecture_configs(lowerCamelCase__ , lowerCamelCase__ )
if "finetuned" not in model_name:
A_ : Dict = False
if "finetuned" in model_name:
A_ : List[Any] = """huggingface/label-files"""
if "kinetics" in model_name:
A_ : Dict = 4_00
A_ : List[str] = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
A_ : Tuple = 1_74
A_ : str = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
A_ : Dict = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : List[str] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
A_ : Optional[Any] = idalabel
A_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if "small" in model_name:
A_ : int = 3_84
A_ : Union[str, Any] = 15_36
A_ : List[str] = 12
A_ : Optional[int] = 16
A_ : Any = 12
A_ : int = 3
A_ : Optional[Any] = 1_92
A_ : Union[str, Any] = 7_68
elif "large" in model_name:
A_ : List[Any] = 10_24
A_ : Optional[Any] = 40_96
A_ : Optional[Any] = 24
A_ : List[str] = 16
A_ : Any = 12
A_ : str = 8
A_ : str = 5_12
A_ : int = 20_48
elif "huge" in model_name:
A_ : Optional[Any] = 12_80
A_ : str = 51_20
A_ : str = 32
A_ : int = 16
A_ : Any = 12
A_ : Union[str, Any] = 8
A_ : Dict = 6_40
A_ : Optional[Any] = 25_60
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "encoder." in name:
A_ : List[Any] = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
A_ : List[str] = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
A_ : Tuple = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
A_ : int = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
A_ : Optional[Any] = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
A_ : Dict = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
A_ : List[str] = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
A_ : List[str] = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
A_ : str = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
A_ : str = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
A_ : Union[str, Any] = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
A_ : Any = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
A_ : List[str] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
A_ : Dict = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
A_ : List[str] = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
A_ : Optional[Any] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
A_ : Tuple = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
A_ : Tuple = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
A_ : Dict = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
A_ : List[str] = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
A_ : Optional[Any] = name.replace("""head""" , """classifier""" )
return name
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A_ : str = orig_state_dict.pop(lowerCamelCase__ )
if key.startswith("""encoder.""" ):
A_ : Tuple = key.replace("""encoder.""" , """""" )
if "qkv" in key:
A_ : Optional[int] = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
A_ : Union[str, Any] = config.decoder_hidden_size
A_ : Any = int(key_split[2] )
A_ : int = """decoder.decoder_layers."""
if "weight" in key:
A_ : Optional[Any] = val[:dim, :]
A_ : Any = val[dim : dim * 2, :]
A_ : Dict = val[-dim:, :]
else:
A_ : List[Any] = config.hidden_size
A_ : List[Any] = int(key_split[1] )
A_ : int = """videomae.encoder.layer."""
if "weight" in key:
A_ : Any = val[:dim, :]
A_ : Union[str, Any] = val[dim : dim * 2, :]
A_ : List[str] = val[-dim:, :]
else:
A_ : Union[str, Any] = val
return orig_state_dict
def a ( ):
'''simple docstring'''
A_ : List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
A_ : Optional[Any] = np.load(lowerCamelCase__ )
return list(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = get_videomae_config(lowerCamelCase__ )
if "finetuned" in model_name:
A_ : List[str] = VideoMAEForVideoClassification(lowerCamelCase__ )
else:
A_ : Optional[Any] = VideoMAEForPreTraining(lowerCamelCase__ )
# download original checkpoint, hosted on Google Drive
A_ : Optional[Any] = """pytorch_model.bin"""
gdown.cached_download(lowerCamelCase__ , lowerCamelCase__ , quiet=lowerCamelCase__ )
A_ : Any = torch.load(lowerCamelCase__ , map_location="""cpu""" )
if "model" in files:
A_ : Any = files["""model"""]
else:
A_ : Dict = files["""module"""]
A_ : Any = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# verify model on basic input
A_ : int = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
A_ : Union[str, Any] = prepare_video()
A_ : str = image_processor(lowerCamelCase__ , return_tensors="""pt""" )
if "finetuned" not in model_name:
A_ : List[str] = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
A_ : Optional[Any] = torch.load(lowerCamelCase__ )
A_ : Dict = model(**lowerCamelCase__ )
A_ : List[Any] = outputs.logits
A_ : Any = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
A_ : str = torch.Size([1, 4_00] )
A_ : Optional[Any] = torch.tensor([-0.9_291, -0.4_061, -0.9_307] )
elif model_name == "videomae-small-finetuned-ssv2":
A_ : str = torch.Size([1, 1_74] )
A_ : Union[str, Any] = torch.tensor([0.2_671, -0.4_689, -0.8_235] )
elif model_name == "videomae-base":
A_ : Tuple = torch.Size([1, 14_08, 15_36] )
A_ : List[str] = torch.tensor([[0.7_739, 0.7_968, 0.7_089], [0.6_701, 0.7_487, 0.6_209], [0.4_287, 0.5_158, 0.4_773]] )
elif model_name == "videomae-base-short":
A_ : Dict = torch.Size([1, 14_08, 15_36] )
A_ : List[str] = torch.tensor([[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] )
# we verified the loss both for normalized and unnormalized targets for this one
A_ : List[Any] = torch.tensor([0.5_142] ) if config.norm_pix_loss else torch.tensor([0.6_469] )
elif model_name == "videomae-large":
A_ : str = torch.Size([1, 14_08, 15_36] )
A_ : Dict = torch.tensor([[0.7_149, 0.7_997, 0.6_966], [0.6_768, 0.7_869, 0.6_948], [0.5_139, 0.6_221, 0.5_605]] )
elif model_name == "videomae-large-finetuned-kinetics":
A_ : int = torch.Size([1, 4_00] )
A_ : Optional[Any] = torch.tensor([0.0_771, 0.0_011, -0.3_625] )
elif model_name == "videomae-huge-finetuned-kinetics":
A_ : Union[str, Any] = torch.Size([1, 4_00] )
A_ : Optional[int] = torch.tensor([0.2_433, 0.1_632, -0.4_894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
A_ : List[Any] = torch.Size([1, 4_00] )
A_ : Optional[Any] = torch.tensor([0.6_588, 0.0_990, -0.2_493] )
elif model_name == "videomae-base-finetuned-kinetics":
A_ : Union[str, Any] = torch.Size([1, 4_00] )
A_ : Tuple = torch.tensor([0.3_669, -0.0_688, -0.2_421] )
elif model_name == "videomae-base-short-ssv2":
A_ : Optional[Any] = torch.Size([1, 14_08, 15_36] )
A_ : List[Any] = torch.tensor([[0.4_712, 0.5_296, 0.5_786], [0.2_278, 0.2_729, 0.4_026], [0.0_352, 0.0_730, 0.2_506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
A_ : Any = torch.Size([1, 1_74] )
A_ : Any = torch.tensor([-0.0_537, -0.1_539, -0.3_266] )
elif model_name == "videomae-base-ssv2":
A_ : Dict = torch.Size([1, 14_08, 15_36] )
A_ : Dict = torch.tensor([[0.8_131, 0.8_727, 0.8_546], [0.7_366, 0.9_377, 0.8_870], [0.5_935, 0.8_874, 0.8_564]] )
elif model_name == "videomae-base-finetuned-ssv2":
A_ : Any = torch.Size([1, 1_74] )
A_ : str = torch.tensor([0.1_961, -0.8_337, -0.6_389] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1E-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
A_ : Optional[int] = outputs.loss
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''',
type=str,
help=(
'''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'''
''' download link.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/Users/nielsrogge/Documents/VideoMAE/Test''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''')
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 206 | 0 |
import numpy as np
class UpperCamelCase :
def __init__( self ):
A__ = (0, 0)
A__ = None
A__ = 0
A__ = 0
A__ = 0
def __eq__( self , UpperCAmelCase__ ):
return self.position == cell.position
def __A ( self ):
print(self.position )
class UpperCamelCase :
def __init__( self , UpperCAmelCase__=(5, 5) ):
A__ = np.zeros(UpperCAmelCase__ )
A__ = world_size[0]
A__ = world_size[1]
def __A ( self ):
print(self.w )
def __A ( self , UpperCAmelCase__ ):
A__ = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
A__ = cell.position[0]
A__ = cell.position[1]
A__ = []
for n in neughbour_cord:
A__ = current_x + n[0]
A__ = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
A__ = Cell()
A__ = (x, y)
A__ = cell
neighbours.append(UpperCAmelCase__ )
return neighbours
def UpperCamelCase ( _A : Dict , _A : int , _A : Any )-> List[str]:
"""simple docstring"""
A__ = []
A__ = []
_open.append(_A )
while _open:
A__ = np.argmin([n.f for n in _open] )
A__ = _open[min_f]
_closed.append(_open.pop(_A ) )
if current == goal:
break
for n in world.get_neigbours(_A ):
for c in _closed:
if c == n:
continue
A__ = current.g + 1
A__ , A__ = n.position
A__ , A__ = goal.position
A__ = (ya - ya) ** 2 + (xa - xa) ** 2
A__ = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_A )
A__ = []
while current.parent is not None:
path.append(current.position )
A__ = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = Gridworld()
# Start position and goal
UpperCAmelCase_ : Union[str, Any] = Cell()
UpperCAmelCase_ : Optional[int] = (0, 0)
UpperCAmelCase_ : Union[str, Any] = Cell()
UpperCAmelCase_ : str = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
UpperCAmelCase_ : Tuple = astar(world, start, goal)
# Just for visual reasons.
for i in s:
UpperCAmelCase_ : Optional[Any] = 1
print(world.w)
| 198 |
from __future__ import annotations
from random import random
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ = None ):
A__ = value
A__ = random()
A__ = None
A__ = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ):
A__ = str(self.value ) + " "
A__ = str(self.left or "" )
A__ = str(self.right or "" )
return value + left + right
def UpperCamelCase ( _A : Node | None , _A : int )-> tuple[Node | None, Node | None]:
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
A__ , A__ = split(root.left , _A )
return left, root
else:
A__ , A__ = split(root.right , _A )
return root, right
def UpperCamelCase ( _A : Node | None , _A : Node | None )-> Node | None:
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
A__ = merge(left.right , _A )
return left
else:
A__ = merge(_A , right.left )
return right
def UpperCamelCase ( _A : Node | None , _A : int )-> Node | None:
"""simple docstring"""
A__ = Node(_A )
A__ , A__ = split(_A , _A )
return merge(merge(_A , _A ) , _A )
def UpperCamelCase ( _A : Node | None , _A : int )-> Node | None:
"""simple docstring"""
A__ , A__ = split(_A , value - 1 )
A__ , A__ = split(_A , _A )
return merge(_A , _A )
def UpperCamelCase ( _A : Node | None )-> None:
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def UpperCamelCase ( _A : Node | None , _A : str )-> Node | None:
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
A__ = insert(_A , int(arg[1:] ) )
elif arg[0] == "-":
A__ = erase(_A , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def UpperCamelCase ( )-> None:
"""simple docstring"""
A__ = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
A__ = input()
while args != "q":
A__ = interact_treap(_A , _A )
print(_A )
A__ = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 198 | 1 |
def _a ( UpperCamelCase_ : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
lowerCAmelCase__ = []
for temp in range(int(UpperCamelCase_ ) ):
series.append(F"1/{temp + 1}" if series else "1" )
return series
if __name__ == "__main__":
a_ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 340 |
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 340 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCAmelCase_ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = ShapEPipeline
__SCREAMING_SNAKE_CASE : Dict = ['prompt']
__SCREAMING_SNAKE_CASE : int = ['prompt']
__SCREAMING_SNAKE_CASE : List[str] = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : Dict = False
@property
def snake_case_ ( self : Tuple ):
return 3_2
@property
def snake_case_ ( self : Any ):
return 3_2
@property
def snake_case_ ( self : List[Any] ):
return self.time_input_dim * 4
@property
def snake_case_ ( self : Optional[int] ):
return 8
@property
def snake_case_ ( self : Dict ):
_UpperCAmelCase : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def snake_case_ ( self : Optional[Any] ):
torch.manual_seed(0 )
_UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(A )
@property
def snake_case_ ( self : Any ):
torch.manual_seed(0 )
_UpperCAmelCase : int = {
"num_attention_heads": 2,
"attention_head_dim": 1_6,
"embedding_dim": self.time_input_dim,
"num_embeddings": 3_2,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
_UpperCAmelCase : Tuple = PriorTransformer(**A )
return model
@property
def snake_case_ ( self : Dict ):
torch.manual_seed(0 )
_UpperCAmelCase : str = {
"param_shapes": (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 1_2,
"background": (
0.1,
0.1,
0.1,
),
}
_UpperCAmelCase : Tuple = ShapERenderer(**A )
return model
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : Union[str, Any] = self.dummy_prior
_UpperCAmelCase : Dict = self.dummy_text_encoder
_UpperCAmelCase : int = self.dummy_tokenizer
_UpperCAmelCase : List[Any] = self.dummy_renderer
_UpperCAmelCase : Any = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1_0_2_4 , prediction_type="sample" , use_karras_sigmas=A , clip_sample=A , clip_sample_range=1.0 , )
_UpperCAmelCase : int = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def snake_case_ ( self : Optional[Any] , A : List[Any] , A : Dict=0 ):
if str(A ).startswith("mps" ):
_UpperCAmelCase : Union[str, Any] = torch.manual_seed(A )
else:
_UpperCAmelCase : str = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : Any = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 3_2,
"output_type": "np",
}
return inputs
def snake_case_ ( self : int ):
_UpperCAmelCase : Any = "cpu"
_UpperCAmelCase : Dict = self.get_dummy_components()
_UpperCAmelCase : Union[str, Any] = self.pipeline_class(**A )
_UpperCAmelCase : Any = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : str = pipe(**self.get_dummy_inputs(A ) )
_UpperCAmelCase : Dict = output.images[0]
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
_UpperCAmelCase : Dict = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self : int ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Union[str, Any] = torch_device == "cpu"
_UpperCAmelCase : List[str] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=A , relax_max_difference=A , )
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : int = self.get_dummy_components()
_UpperCAmelCase : List[Any] = self.pipeline_class(**A )
_UpperCAmelCase : int = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
for key in inputs.keys():
if key in self.batch_params:
_UpperCAmelCase : str = batch_size * [inputs[key]]
_UpperCAmelCase : Any = pipe(**A , num_images_per_prompt=A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
_UpperCAmelCase : int = ShapEPipeline.from_pretrained("openai/shap-e" )
_UpperCAmelCase : Optional[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(0 )
_UpperCAmelCase : int = pipe(
"a shark" , generator=A , guidance_scale=15.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type="np" , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(A , A )
| 369 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : Union[str, Any] = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 202 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = WavaVecaForSequenceClassification.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
A__ = downstream_dict['projector.weight']
A__ = downstream_dict['projector.bias']
A__ = downstream_dict['model.post_net.linear.weight']
A__ = downstream_dict['model.post_net.linear.bias']
return model
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = WavaVecaForAudioFrameClassification.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
A__ = downstream_dict['model.linear.weight']
A__ = downstream_dict['model.linear.bias']
return model
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = WavaVecaForXVector.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
A__ = downstream_dict['connector.weight']
A__ = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A__ = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
A__ = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
A__ = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
A__ = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
A__ = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
A__ = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
A__ = downstream_dict['objective.W']
return model
@torch.no_grad()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = torch.load(UpperCamelCase__ , map_location='cpu' )
A__ = checkpoint['Downstream']
A__ = WavaVecaConfig.from_pretrained(UpperCamelCase__ )
A__ = WavaVecaFeatureExtractor.from_pretrained(
UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , do_normalize=UpperCamelCase__ )
A__ = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
A__ = convert_classification(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif arch.endswith('ForAudioFrameClassification' ):
A__ = convert_diarization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif arch.endswith('ForXVector' ):
A__ = convert_xvector(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
A__ = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__lowerCamelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 221 | """simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=7 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=99 ,__UpperCAmelCase=32 ,__UpperCAmelCase=2 ,__UpperCAmelCase=4 ,__UpperCAmelCase=37 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=16 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=None ,__UpperCAmelCase=0 ,) -> Dict:
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = projection_dim
def snake_case__ ( self ) -> Optional[Any]:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A__ = ids_tensor([self.batch_size] ,self.num_choices )
A__ = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__UpperCAmelCase ,initializer_range=self.initializer_range ,)
A__ = DPRConfig(projection_dim=self.projection_dim ,**config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
A__ = TFDPRContextEncoder(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = TFDPRQuestionEncoder(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
A__ = TFDPRReader(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape ,(self.batch_size,) )
def snake_case__ ( self ) -> int:
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class UpperCamelCase__( __A , __A , unittest.TestCase ):
lowerCAmelCase__ : Optional[int] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ : List[str] = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : str = False
def snake_case__ ( self ) -> str:
A__ = TFDPRModelTester(self )
A__ = ConfigTester(self ,config_class=__UpperCAmelCase ,hidden_size=37 )
def snake_case__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> int:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__UpperCAmelCase )
def snake_case__ ( self ) -> Optional[Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__UpperCAmelCase )
def snake_case__ ( self ) -> List[str]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__UpperCAmelCase )
@slow
def snake_case__ ( self ) -> int:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRContextEncoder.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRContextEncoder.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRQuestionEncoder.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRReader.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class UpperCamelCase__( unittest.TestCase ):
@slow
def snake_case__ ( self ) -> Optional[Any]:
A__ = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
A__ = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
A__ = model(__UpperCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
A__ = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 221 | 1 |
def _A ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _A ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
UpperCAmelCase : Dict = [0, 2, 4, 6, 8]
UpperCAmelCase : Tuple = [1, 3, 5, 7, 9]
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
a__ : str =0
for digit in range(10 ):
a__ : int =digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return result
a__ : List[str] =0
for digita in range(10 ):
a__ : Optional[int] =digita
if (remainder + digita) % 2 == 0:
a__ : Dict =ODD_DIGITS
else:
a__ : Any =EVEN_DIGITS
for digita in other_parity_digits:
a__ : Union[str, Any] =digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
return result
def _A ( SCREAMING_SNAKE_CASE : int = 9 ):
"""simple docstring"""
a__ : List[str] =0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(SCREAMING_SNAKE_CASE , 0 , [0] * length , SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 148 | 0 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=2 , _A=56 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=2 , _A=2 , _A=7 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.02 , _A=4 , _A="block_sparse" , _A=True , _A=False , _A=2 , _A=3 , ) -> Any:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_attention_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = rescale_embeddings
SCREAMING_SNAKE_CASE_ = attention_type
SCREAMING_SNAKE_CASE_ = use_bias
SCREAMING_SNAKE_CASE_ = block_size
SCREAMING_SNAKE_CASE_ = num_random_blocks
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ) -> Optional[int]:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ) -> Any:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ) -> Tuple:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ) -> Dict:
super().test_hidden_states_output()
@slow
def _UpperCamelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(_A )
def _UpperCamelCase ( self ) -> Tuple:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_A , _A )
SCREAMING_SNAKE_CASE_ = model_class(_A )
@jax.jit
def model_jitted(_A , _A=None , **_A ):
return model(input_ids=_A , attention_mask=_A , **_A )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( self , _A , _A , _A , _A=1E-5 , _A="outputs" , _A=None ) -> Dict:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(_A , _A , _A , _A , _A , _A )
| 299 |
from cva import destroyAllWindows, imread, imshow, waitKey
def A__ ( __lowerCamelCase ):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__UpperCAmelCase = imread("image_data/lena.jpg", 1)
# convert to its negative
__UpperCAmelCase = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 299 | 1 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowerCamelCase ) , 'Tatoeba directory does not exist.' )
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase_ )
@slow
def A__ (self ):
'''simple docstring'''
self.resolver.convert_models(["""heb-eng"""] )
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=UpperCamelCase_ )
assert mmeta["long_pair"] == "heb-eng" | 357 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
def A__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , ) | 317 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.