code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : str = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = "realm"
def __init__( self : int , lowercase : Tuple=30_522 , lowercase : Any=768 , lowercase : str=128 , lowercase : Dict=12 , lowercase : Union[str, Any]=12 , lowercase : int=8 , lowercase : Union[str, Any]=3_072 , lowercase : List[str]="gelu_new" , lowercase : int=0.1 , lowercase : List[Any]=0.1 , lowercase : Optional[int]=512 , lowercase : Optional[int]=2 , lowercase : Union[str, Any]=0.02 , lowercase : Any=1E-12 , lowercase : Optional[Any]=256 , lowercase : Any=10 , lowercase : List[Any]=1E-3 , lowercase : Optional[int]=5 , lowercase : List[str]=320 , lowercase : List[Any]=13_353_718 , lowercase : List[str]=5_000 , lowercase : int=1 , lowercase : Optional[int]=0 , lowercase : List[str]=2 , **lowercase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
# Common config
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = retriever_proj_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = num_candidates
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = type_vocab_size
_snake_case = layer_norm_eps
# Reader config
_snake_case = span_hidden_size
_snake_case = max_span_width
_snake_case = reader_layer_norm_eps
_snake_case = reader_beam_size
_snake_case = reader_seq_len
# Retrieval config
_snake_case = num_block_records
_snake_case = searcher_beam_size | 282 |
import numpy as np
def a_ ( __lowercase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ): # noqa: E741
while r - l > 1:
_A : List[str] = (l + r) // 2
if v[m] >= key:
_A : Any = m
else:
_A : Tuple = m # noqa: E741
return r
def lowerCAmelCase_ ( snake_case_ ):
if len(_lowerCamelCase ) == 0:
return 0
_A : int = [0] * len(_lowerCamelCase )
_A : Dict = 1
_A : Optional[Any] = v[0]
for i in range(1,len(_lowerCamelCase ) ):
if v[i] < tail[0]:
_A : Optional[Any] = v[i]
elif v[i] > tail[length - 1]:
_A : Tuple = v[i]
length += 1
else:
_A : int = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
@property
def a__ ( self ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ ( self ) -> List[Any]:
_A : int = ort.SessionOptions()
_A : Any = False
return options
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_A : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_A : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_A : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = """A red cat sitting on a park bench"""
_A : Optional[Any] = np.random.RandomState(0 )
_A : Dict = pipe(
prompt=_a , image=_a , mask_image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_a , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 343 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: int = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Any = "gpt_neox"
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Optional[int]=5_0_4_3_2 , SCREAMING_SNAKE_CASE :List[str]=6_1_4_4 , SCREAMING_SNAKE_CASE :List[Any]=4_4 , SCREAMING_SNAKE_CASE :Optional[Any]=6_4 , SCREAMING_SNAKE_CASE :List[str]=2_4_5_7_6 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :Optional[Any]=0.25 , SCREAMING_SNAKE_CASE :List[Any]=1_0_0_0_0 , SCREAMING_SNAKE_CASE :int=0.0 , SCREAMING_SNAKE_CASE :Any=0.0 , SCREAMING_SNAKE_CASE :Any=0.1 , SCREAMING_SNAKE_CASE :Tuple=2_0_4_8 , SCREAMING_SNAKE_CASE :str=0.02 , SCREAMING_SNAKE_CASE :str=1e-5 , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Union[str, Any]=0 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :int=False , SCREAMING_SNAKE_CASE :Tuple=True , SCREAMING_SNAKE_CASE :Union[str, Any]=None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> int:
'''simple docstring'''
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_a : Union[str, Any] =vocab_size
_a : Optional[int] =max_position_embeddings
_a : Any =hidden_size
_a : Dict =num_hidden_layers
_a : Union[str, Any] =num_attention_heads
_a : Union[str, Any] =intermediate_size
_a : Dict =hidden_act
_a : str =rotary_pct
_a : List[str] =rotary_emb_base
_a : List[str] =attention_dropout
_a : Dict =hidden_dropout
_a : Optional[Any] =classifier_dropout
_a : str =initializer_range
_a : Dict =layer_norm_eps
_a : List[Any] =use_cache
_a : int =tie_word_embeddings
_a : int =use_parallel_residual
_a : str =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def __UpperCAmelCase ( self :str ) -> str:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCamelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"got {self.rope_scaling}" )
_a : Optional[int] =self.rope_scaling.get("""type""" , UpperCamelCase_ )
_a : int =self.rope_scaling.get("""factor""" , UpperCamelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 276 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a_ = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =[
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **UpperCamelCase_ ) -> Optional[Any]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowercase : Union[str, Any] = deprecated_arg[3:]
setattr(self , UpperCamelCase_ , not kwargs.pop(UpperCamelCase_ ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
__lowercase : Dict = kwargs.pop('''torchscript''' , self.torchscript )
__lowercase : str = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
__lowercase : str = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**UpperCamelCase_ )
UpperCamelCase =field(default=snake_case , metadata={"help": "Trace the models using torchscript"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
UpperCamelCase =field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def _lowerCamelCase ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
__lowercase : str = torch.device('''cpu''' )
__lowercase : Optional[Any] = 0
elif is_torch_tpu_available():
__lowercase : str = xm.xla_device()
__lowercase : Any = 0
else:
__lowercase : List[str] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__lowercase : Any = torch.cuda.device_count()
return device, n_gpu
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
return is_torch_tpu_available() and self.tpu
@property
def _lowerCamelCase ( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _lowerCamelCase ( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def _lowerCamelCase ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def _lowerCamelCase ( self ) -> Dict:
return self.n_gpu > 0
| 249 | 0 |
"""simple docstring"""
import qiskit
def _lowerCamelCase( a , a ):
__a = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
__a = qiskit.QuantumCircuit(lowerCAmelCase__ , lowerCAmelCase__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
__a = qiskit.execute(lowerCAmelCase__ , lowerCAmelCase__ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCAmelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Dict = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''')
| 366 | """simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__:Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:List[Any] = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__:Optional[int] = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__:Tuple = {
"""openbmb/cpm-ant-10b""": 1024,
}
def _lowerCamelCase( a ):
__a = collections.OrderedDict()
with open(a , "r" , encoding="utf-8" ) as reader:
__a = reader.readlines()
for index, token in enumerate(a ):
__a = token.rstrip("\n" )
__a = index
return vocab
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase="<unk>" , lowerCamelCase=200 ):
__a = vocab
__a = unk_token
__a = max_input_chars_per_word
def a__ ( self , lowerCamelCase ):
__a = list(lowerCamelCase )
if len(lowerCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
__a = 0
__a = []
while start < len(lowerCamelCase ):
__a = len(lowerCamelCase )
__a = None
while start < end:
__a = "".join(chars[start:end] )
if substr in self.vocab:
__a = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCamelCase )
__a = end
return sub_tokens
class snake_case__ ( snake_case_ ):
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : int = ["""input_ids""", """attention_mask"""]
_snake_case : int = False
def __init__( self , lowerCamelCase , lowerCamelCase="<d>" , lowerCamelCase="</d>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase="<unk>" , lowerCamelCase="</n>" , lowerCamelCase="</_>" , lowerCamelCase="left" , **lowerCamelCase , ):
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=lowerCamelCase , eod_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , unk_token=lowerCamelCase , line_token=lowerCamelCase , space_token=lowerCamelCase , padding_side=lowerCamelCase , **lowerCamelCase , )
__a = bod_token
__a = eod_token
__a = load_vocab(lowerCamelCase )
__a = self.encoder[space_token]
__a = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__a = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) )
__a = {v: k for k, v in self.encoder.items()}
__a = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def a__ ( self ):
return self.encoder[self.bod_token]
@property
def a__ ( self ):
return self.encoder[self.eod_token]
@property
def a__ ( self ):
return self.encoder["\n"]
@property
def a__ ( self ):
return len(self.encoder )
def a__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self , lowerCamelCase ):
__a = []
for x in jieba.cut(lowerCamelCase , cut_all=lowerCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase ) )
return output_tokens
def a__ ( self , lowerCamelCase , **lowerCamelCase ):
__a = [i for i in token_ids if i >= 0]
__a = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase ):
return token in self.encoder
def a__ ( self , lowerCamelCase ):
return "".join(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def a__ ( self , lowerCamelCase ):
return self.decoder.get(lowerCamelCase , self.unk_token )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if os.path.isdir(lowerCamelCase ):
__a = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
__a = (filename_prefix + "-" if filename_prefix else "") + save_directory
__a = 0
if " " in self.encoder:
__a = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
__a = self.encoder["\n"]
del self.encoder["\n"]
__a = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
__a = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase ))
return [1] + ([0] * len(lowerCamelCase ))
| 268 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
A__: List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = ["pixel_values"]
def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE :Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE :bool = True , **SCREAMING_SNAKE_CASE :Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =size if size is not None else {"""shortest_edge""": 2_2_4}
_a : Dict =get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
_a : int =crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_a : Any =get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
_a : Any =do_resize
_a : Union[str, Any] =size
_a : Union[str, Any] =resample
_a : Any =do_center_crop
_a : Dict =crop_size
_a : List[Any] =do_rescale
_a : Union[str, Any] =rescale_factor
_a : Optional[int] =do_normalize
_a : Tuple =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_a : Any =image_std if image_std is not None else OPENAI_CLIP_STD
_a : Dict =do_convert_rgb
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :List[str] , ) -> np.ndarray:
'''simple docstring'''
_a : List[Any] =get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
_a : Optional[int] =get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size["""shortest_edge"""] , default_to_square=SCREAMING_SNAKE_CASE )
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :Dict , ) -> np.ndarray:
'''simple docstring'''
_a : int =get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Union[int, float] , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :Optional[Any] , ) -> List[Any]:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Union[float, List[float]] , SCREAMING_SNAKE_CASE :Union[float, List[float]] , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :ImageInput , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = None , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :int = None , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :float = None , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE :Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE :Optional[ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE :int , ) -> PIL.Image.Image:
'''simple docstring'''
_a : Optional[int] =do_resize if do_resize is not None else self.do_resize
_a : Optional[int] =size if size is not None else self.size
_a : List[Any] =get_size_dict(SCREAMING_SNAKE_CASE , param_name="""size""" , default_to_square=SCREAMING_SNAKE_CASE )
_a : Any =resample if resample is not None else self.resample
_a : Any =do_center_crop if do_center_crop is not None else self.do_center_crop
_a : Optional[Any] =crop_size if crop_size is not None else self.crop_size
_a : Optional[Any] =get_size_dict(SCREAMING_SNAKE_CASE , param_name="""crop_size""" , default_to_square=SCREAMING_SNAKE_CASE )
_a : Any =do_rescale if do_rescale is not None else self.do_rescale
_a : Union[str, Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
_a : List[Any] =do_normalize if do_normalize is not None else self.do_normalize
_a : Optional[int] =image_mean if image_mean is not None else self.image_mean
_a : Tuple =image_std if image_std is not None else self.image_std
_a : int =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a : Any =make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a : Tuple =[convert_to_rgb(SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
_a : Any =[to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
_a : Union[str, Any] =[self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
_a : Tuple =[self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
_a : List[Any] =[self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
_a : Union[str, Any] =[self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
_a : List[Any] =[to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
_a : Tuple ={"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 276 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str
__UpperCamelCase : int
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> list[str]:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(_UpperCAmelCase ) )]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> BWTTransformDict:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_a : List[Any] =all_rotations(_UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_a : BWTTransformDict ={
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_UpperCAmelCase ),
}
return response
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_a : List[str] =int(_UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(_UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_a : Optional[int] =[""""""] * len(_UpperCAmelCase )
for _ in range(len(_UpperCAmelCase ) ):
for i in range(len(_UpperCAmelCase ) ):
_a : int =bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
A__: Any = '''Provide a string that I will generate its BWT transform: '''
A__: Union[str, Any] = input(entry_msg).strip()
A__: Optional[int] = bwt_transform(s)
print(
F"Burrows Wheeler transform for string '{s}' results "
F"in '{result['bwt_string']}'"
)
A__: Union[str, Any] = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
F"we get original string '{original_string}'"
)
| 276 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__A = logging.get_logger(__name__)
# General docstring
__A = "RegNetConfig"
# Base docstring
__A = "facebook/regnet-y-040"
__A = [1, 1088, 7, 7]
# Image classification docstring
__A = "facebook/regnet-y-040"
__A = "tabby, tabby cat"
__A = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : Optional[str] = "relu" , **UpperCAmelCase_ : Union[str, Any] , ) ->str:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCamelCase__: int =tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
lowerCamelCase__: Any =tf.keras.layers.ConvaD(
filters=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , strides=UpperCAmelCase_ , padding="VALID" , groups=UpperCAmelCase_ , use_bias=UpperCAmelCase_ , name="convolution" , )
lowerCamelCase__: List[Any] =tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization")
lowerCamelCase__: str =ACTaFN[activation] if activation is not None else tf.identity
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Dict =self.convolution(self.padding(UpperCAmelCase_))
lowerCamelCase__: Any =self.normalization(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.activation(UpperCAmelCase_)
return hidden_state
class _SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self : Optional[int] , UpperCAmelCase_ : RegNetConfig , **UpperCAmelCase_ : Any) ->List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =config.num_channels
lowerCamelCase__: Optional[int] =TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Dict) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =shape_list(UpperCAmelCase_)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration.")
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCamelCase__: Tuple =tf.transpose(UpperCAmelCase_ , perm=(0, 2, 3, 1))
lowerCamelCase__: Tuple =self.embedder(UpperCAmelCase_)
return hidden_state
class _SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 2 , **UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: List[str] =tf.keras.layers.ConvaD(
filters=UpperCAmelCase_ , kernel_size=1 , strides=UpperCAmelCase_ , use_bias=UpperCAmelCase_ , name="convolution")
lowerCamelCase__: List[str] =tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization")
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : tf.Tensor , UpperCAmelCase_ : bool = False) ->tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(UpperCAmelCase_) , training=UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase_ , name="pooler")
lowerCamelCase__: List[Any] =[
tf.keras.layers.ConvaD(filters=UpperCAmelCase_ , kernel_size=1 , activation="relu" , name="attention.0"),
tf.keras.layers.ConvaD(filters=UpperCAmelCase_ , kernel_size=1 , activation="sigmoid" , name="attention.2"),
]
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.pooler(UpperCAmelCase_)
for layer_module in self.attention:
lowerCamelCase__: Union[str, Any] =layer_module(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =hidden_state * pooled
return hidden_state
class _SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : RegNetConfig , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Union[str, Any]) ->Any:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =in_channels != out_channels or stride != 1
lowerCamelCase__: List[str] =max(1 , out_channels // config.groups_width)
lowerCamelCase__: List[Any] =(
TFRegNetShortCut(UpperCAmelCase_ , stride=UpperCAmelCase_ , name="shortcut")
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut")
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCamelCase__: str =[
TFRegNetConvLayer(UpperCAmelCase_ , kernel_size=1 , activation=config.hidden_act , name="layer.0"),
TFRegNetConvLayer(
UpperCAmelCase_ , stride=UpperCAmelCase_ , groups=UpperCAmelCase_ , activation=config.hidden_act , name="layer.1"),
TFRegNetConvLayer(UpperCAmelCase_ , kernel_size=1 , activation=UpperCAmelCase_ , name="layer.2"),
]
lowerCamelCase__: Tuple =ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: List[Any] =hidden_state
for layer_module in self.layers:
lowerCamelCase__: Union[str, Any] =layer_module(UpperCAmelCase_)
lowerCamelCase__: str =self.shortcut(UpperCAmelCase_)
hidden_state += residual
lowerCamelCase__: Optional[int] =self.activation(UpperCAmelCase_)
return hidden_state
class _SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self : str , UpperCAmelCase_ : RegNetConfig , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : List[Any]) ->Tuple:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =in_channels != out_channels or stride != 1
lowerCamelCase__: Tuple =max(1 , out_channels // config.groups_width)
lowerCamelCase__: List[Any] =(
TFRegNetShortCut(UpperCAmelCase_ , stride=UpperCAmelCase_ , name="shortcut")
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut")
)
lowerCamelCase__: List[Any] =[
TFRegNetConvLayer(UpperCAmelCase_ , kernel_size=1 , activation=config.hidden_act , name="layer.0"),
TFRegNetConvLayer(
UpperCAmelCase_ , stride=UpperCAmelCase_ , groups=UpperCAmelCase_ , activation=config.hidden_act , name="layer.1"),
TFRegNetSELayer(UpperCAmelCase_ , reduced_channels=int(round(in_channels / 4)) , name="layer.2"),
TFRegNetConvLayer(UpperCAmelCase_ , kernel_size=1 , activation=UpperCAmelCase_ , name="layer.3"),
]
lowerCamelCase__: str =ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Tuple) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =hidden_state
for layer_module in self.layers:
lowerCamelCase__: int =layer_module(UpperCAmelCase_)
lowerCamelCase__: List[str] =self.shortcut(UpperCAmelCase_)
hidden_state += residual
lowerCamelCase__: Any =self.activation(UpperCAmelCase_)
return hidden_state
class _SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : RegNetConfig , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , **UpperCAmelCase_ : int) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowerCamelCase__: Dict =[
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_ , name="layers.0"),
*[layer(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , name=F"""layers.{i+1}""") for i in range(depth - 1)],
]
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : List[str]) ->int:
'''simple docstring'''
for layer_module in self.layers:
lowerCamelCase__: Any =layer_module(UpperCAmelCase_)
return hidden_state
class _SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self : List[str] , UpperCAmelCase_ : RegNetConfig , **UpperCAmelCase_ : List[Any]) ->int:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =[]
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
UpperCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ))
lowerCamelCase__: Tuple =zip(config.hidden_sizes , config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(UpperCAmelCase_ , config.depths[1:])):
self.stages.append(TFRegNetStage(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , depth=UpperCAmelCase_ , name=F"""stages.{i+1}"""))
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : tf.Tensor , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True) ->TFBaseModelOutputWithNoAttention:
'''simple docstring'''
lowerCamelCase__: List[str] =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCamelCase__: Optional[Any] =hidden_states + (hidden_state,)
lowerCamelCase__: Dict =stage_module(UpperCAmelCase_)
if output_hidden_states:
lowerCamelCase__: Tuple =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase_ , hidden_states=UpperCAmelCase_)
@keras_serializable
class _SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
lowercase_ = RegNetConfig
def __init__(self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str]) ->List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: int =config
lowerCamelCase__: Optional[int] =TFRegNetEmbeddings(UpperCAmelCase_ , name="embedder")
lowerCamelCase__: Dict =TFRegNetEncoder(UpperCAmelCase_ , name="encoder")
lowerCamelCase__: Any =tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase_ , name="pooler")
@unpack_inputs
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : tf.Tensor , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , ) ->TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
lowerCamelCase__: Any =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__: List[str] =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__: Optional[Any] =self.embedder(UpperCAmelCase_ , training=UpperCAmelCase_)
lowerCamelCase__: Tuple =self.encoder(
UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , training=UpperCAmelCase_)
lowerCamelCase__: str =encoder_outputs[0]
lowerCamelCase__: Optional[Any] =self.pooler(UpperCAmelCase_)
# Change to NCHW output format have uniformity in the modules
lowerCamelCase__: Any =tf.transpose(UpperCAmelCase_ , perm=(0, 3, 1, 2))
lowerCamelCase__: Optional[int] =tf.transpose(UpperCAmelCase_ , perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCamelCase__: List[str] =tuple([tf.transpose(UpperCAmelCase_ , perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = RegNetConfig
lowercase_ = "regnet"
lowercase_ = "pixel_values"
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa)}
__A = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , __SCREAMING_SNAKE_CASE , )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : RegNetConfig , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str]) ->Optional[int]:
'''simple docstring'''
super().__init__(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =TFRegNetMainLayer(UpperCAmelCase_ , name="regnet")
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCAmelCase_)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : tf.Tensor , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Dict=False , ) ->Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
lowerCamelCase__: Dict =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__: Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__: Any =self.regnet(
pixel_values=UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , training=UpperCAmelCase_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __SCREAMING_SNAKE_CASE , )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : RegNetConfig , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int]) ->List[str]:
'''simple docstring'''
super().__init__(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: str =config.num_labels
lowerCamelCase__: List[str] =TFRegNetMainLayer(UpperCAmelCase_ , name="regnet")
# classification head
lowerCamelCase__: Any =[
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1") if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(UpperCAmelCase_)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : tf.Tensor = None , UpperCAmelCase_ : tf.Tensor = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : List[str]=False , ) ->Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__: str =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__: Any =self.regnet(
UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , training=UpperCAmelCase_)
lowerCamelCase__: List[str] =outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase__: Any =self.classifier[0](UpperCAmelCase_)
lowerCamelCase__: str =self.classifier[1](UpperCAmelCase_)
lowerCamelCase__: int =None if labels is None else self.hf_compute_loss(labels=UpperCAmelCase_ , logits=UpperCAmelCase_)
if not return_dict:
lowerCamelCase__: Any =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states)
| 364 |
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
if isinstance(__a , __a ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(__a , __a ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
lowerCamelCase__: Optional[int] =False
if num < 0:
lowerCamelCase__: Optional[Any] =True
lowerCamelCase__: List[Any] =-num
lowerCamelCase__: list[int] =[]
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__a ) for e in binary )
return "0b" + "".join(str(__a ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 | 0 |
'''simple docstring'''
import sys
a_ : Tuple = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def a_ ( __snake_case : str = N ) -> int:
"""simple docstring"""
lowerCamelCase_ =-sys.maxsize - 1
for i in range(len(__snake_case ) - 12 ):
lowerCamelCase_ =1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowerCamelCase_ =product
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 75 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Tuple ) -> None:
"""simple docstring"""
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) | 145 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class UpperCamelCase_ (unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = TFAutoModel.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = AutoModel.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = AutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : str = TFAutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = AutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = AutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = AutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : int = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : int = AutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
UpperCAmelCase_ : str = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
UpperCAmelCase_ : Optional[Any] = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
UpperCAmelCase_ : str = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
| 253 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''longformer'''
def __init__( self : List[str] , lowerCAmelCase_ : Union[List[int], int] = 512 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 30_522 , lowerCAmelCase_ : int = 768 , lowerCAmelCase_ : int = 12 , lowerCAmelCase_ : int = 12 , lowerCAmelCase_ : int = 3_072 , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 512 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 1e-12 , lowerCAmelCase_ : bool = False , **lowerCAmelCase_ : Optional[int] , ) -> Dict:
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = attention_window
UpperCAmelCase_ : Dict = sep_token_id
UpperCAmelCase_ : Any = bos_token_id
UpperCAmelCase_ : Dict = eos_token_id
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = onnx_export
class UpperCamelCase_ (__A ):
def __init__( self : List[Any] , lowerCAmelCase_ : "PretrainedConfig" , lowerCAmelCase_ : str = "default" , lowerCAmelCase_ : "List[PatchingSpec]" = None ) -> Union[str, Any]:
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : int = True
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ : Dict = super().outputs
if self.task == "default":
UpperCAmelCase_ : List[str] = {0: "batch"}
return outputs
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : "PreTrainedTokenizerBase" , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
UpperCAmelCase_ : Tuple = super().generate_dummy_inputs(
preprocessor=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase_ : str = torch.zeros_like(inputs["input_ids"] )
# make every second token global
UpperCAmelCase_ : Union[str, Any] = 1
return inputs
| 253 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
"""simple docstring"""
from __future__ import annotations
__A = tuple[int, int, int]
__A = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__A = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
__A = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
__A = """FOBHMDKEXQNRAULPGSJVTYICZW"""
__A = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
__A = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
__A = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
__A = """SGLCPQWZHKXAREONTFBVIYJUDM"""
__A = """HVSICLTYKQUBXDWAJZOMFGPREN"""
__A = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
__A = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
__A = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
"""simple docstring"""
if (unique_rotsel := len(set(_SCREAMING_SNAKE_CASE ) )) < 3:
lowerCAmelCase__ :Union[str, Any] = F"Please use 3 unique rotors (not {unique_rotsel})"
raise Exception(_SCREAMING_SNAKE_CASE )
# Checks if rotor positions are valid
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = rotpos
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Tuple = F"First rotor position is not within range of 1..26 ({rotorposa}"
raise ValueError(_SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = F"Second rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Union[str, Any] = F"Third rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_SCREAMING_SNAKE_CASE )
# Validates string and returns dict
lowerCAmelCase__ :int = _plugboard(_SCREAMING_SNAKE_CASE )
return rotpos, rotsel, pbdict
def __A (_SCREAMING_SNAKE_CASE ) ->dict[str, str]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :str = F"Plugboard setting isn't type string ({type(_SCREAMING_SNAKE_CASE )})"
raise TypeError(_SCREAMING_SNAKE_CASE )
elif len(_SCREAMING_SNAKE_CASE ) % 2 != 0:
lowerCAmelCase__ :str = F"Odd number of symbols ({len(_SCREAMING_SNAKE_CASE )})"
raise Exception(_SCREAMING_SNAKE_CASE )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
lowerCAmelCase__ :Any = set()
for i in pbstring:
if i not in abc:
lowerCAmelCase__ :Any = F"'{i}' not in list of symbols"
raise Exception(_SCREAMING_SNAKE_CASE )
elif i in tmppbl:
lowerCAmelCase__ :Dict = F"Duplicate symbol ({i})"
raise Exception(_SCREAMING_SNAKE_CASE )
else:
tmppbl.add(_SCREAMING_SNAKE_CASE )
del tmppbl
# Created the dictionary
lowerCAmelCase__ :List[Any] = {}
for j in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 , 2 ):
lowerCAmelCase__ :Optional[int] = pbstring[j + 1]
lowerCAmelCase__ :Union[str, Any] = pbstring[j]
return pb
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (rotora, rotora, rotora) , _SCREAMING_SNAKE_CASE = "" , ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Tuple = text.upper()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = _validator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , plugb.upper() )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = rotor_position
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowerCAmelCase__ :Dict = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowerCAmelCase__ :Dict = plugboard[symbol]
# rotor ra --------------------------
lowerCAmelCase__ :Optional[int] = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
lowerCAmelCase__ :str = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# rotor rb --------------------------
lowerCAmelCase__ :Optional[int] = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
lowerCAmelCase__ :int = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# rotor rc --------------------------
lowerCAmelCase__ :str = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
lowerCAmelCase__ :Optional[Any] = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowerCAmelCase__ :str = reflector[symbol]
# 2nd rotors
lowerCAmelCase__ :Tuple = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
lowerCAmelCase__ :Optional[int] = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
lowerCAmelCase__ :Any = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowerCAmelCase__ :Union[str, Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :str = 0
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :List[Any] = 0
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = """This is my Python script that emulates the Enigma machine from WWII."""
__A = (1, 1, 1)
__A = """pictures"""
__A = (rotora, rotora, rotora)
__A = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 293 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ["image_processor", "tokenizer"]
__UpperCAmelCase : Optional[int] = "AutoImageProcessor"
__UpperCAmelCase : int = "AutoTokenizer"
def __init__( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : str ) -> List[str]:
super().__init__(lowerCamelCase , lowerCamelCase )
__snake_case : Union[str, Any] = self.image_processor
def __call__( self : List[Any] , lowerCamelCase : Optional[int]=None , lowerCamelCase : int=None , lowerCamelCase : int=None , **lowerCamelCase : Optional[int] ) -> int:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__snake_case : Any = self.tokenizer(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if images is not None:
__snake_case : List[Any] = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None and images is not None:
__snake_case : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase ) , tensor_type=lowerCamelCase )
def __snake_case ( self : int , *lowerCamelCase : int , **lowerCamelCase : Any ) -> List[Any]:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase : Dict , **lowerCamelCase : List[str] ) -> Tuple:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : List[str] ) -> Tuple:
return ["input_ids", "attention_mask", "pixel_values"]
| 134 |
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return x if y == 0 else greatest_common_divisor(__lowerCamelCase , x % y )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return (x * y) // greatest_common_divisor(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase = 2_0 ):
__snake_case : Optional[Any] = 1
for i in range(1 , n + 1 ):
__snake_case : Any = lcm(__lowerCamelCase , __lowerCamelCase )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 134 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : torch.FloatTensor
class _snake_case ( lowercase_ , lowercase_ ):
@register_to_config
def __init__( self , a__ = 32 , a__ = 64 , a__ = 20 , a__ = 768 , a__=77 , a__=4 , a__ = 0.0 , a__ = "silu" , a__ = None , a__ = None , a__ = "linear" , a__ = "prd" , a__ = None , a__ = None , a__ = None , ) -> Tuple:
'''simple docstring'''
super().__init__()
snake_case_ = num_attention_heads
snake_case_ = attention_head_dim
snake_case_ = num_attention_heads * attention_head_dim
snake_case_ = additional_embeddings
snake_case_ = time_embed_dim or inner_dim
snake_case_ = embedding_proj_dim or embedding_dim
snake_case_ = clip_embed_dim or embedding_dim
snake_case_ = Timesteps(a__ , a__ , 0 )
snake_case_ = TimestepEmbedding(a__ , a__ , out_dim=a__ , act_fn=a__ )
snake_case_ = nn.Linear(a__ , a__ )
if embedding_proj_norm_type is None:
snake_case_ = None
elif embedding_proj_norm_type == "layer":
snake_case_ = nn.LayerNorm(a__ )
else:
raise ValueError(F'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}' )
snake_case_ = nn.Linear(a__ , a__ )
if encoder_hid_proj_type is None:
snake_case_ = None
elif encoder_hid_proj_type == "linear":
snake_case_ = nn.Linear(a__ , a__ )
else:
raise ValueError(F'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}' )
snake_case_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , a__ ) )
if added_emb_type == "prd":
snake_case_ = nn.Parameter(torch.zeros(1 , 1 , a__ ) )
elif added_emb_type is None:
snake_case_ = None
else:
raise ValueError(
F'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.' )
snake_case_ = nn.ModuleList(
[
BasicTransformerBlock(
a__ , a__ , a__ , dropout=a__ , activation_fn="gelu" , attention_bias=a__ , )
for d in range(a__ )
] )
if norm_in_type == "layer":
snake_case_ = nn.LayerNorm(a__ )
elif norm_in_type is None:
snake_case_ = None
else:
raise ValueError(F'Unsupported norm_in_type: {norm_in_type}.' )
snake_case_ = nn.LayerNorm(a__ )
snake_case_ = nn.Linear(a__ , a__ )
snake_case_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
snake_case_ = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , a__ , persistent=a__ )
snake_case_ = nn.Parameter(torch.zeros(1 , a__ ) )
snake_case_ = nn.Parameter(torch.zeros(1 , a__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCAmelCase__ ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
snake_case_ = {}
def fn_recursive_add_processors(a__ , a__ , a__ ):
if hasattr(a__ , "set_processor" ):
snake_case_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , a__ , a__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(a__ , a__ , a__ )
return processors
def lowerCAmelCase__ ( self , a__ ) -> List[Any]:
'''simple docstring'''
snake_case_ = len(self.attn_processors.keys() )
if isinstance(a__ , a__ ) and len(a__ ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(a__ )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(a__ , a__ , a__ ):
if hasattr(a__ , "set_processor" ):
if not isinstance(a__ , a__ ):
module.set_processor(a__ )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , a__ , a__ )
for name, module in self.named_children():
fn_recursive_attn_processor(a__ , a__ , a__ )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ = None , a__ = None , a__ = True , ) -> Dict:
'''simple docstring'''
snake_case_ = hidden_states.shape[0]
snake_case_ = timestep
if not torch.is_tensor(a__ ):
snake_case_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(a__ ) and len(timesteps.shape ) == 0:
snake_case_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case_ = timesteps * torch.ones(a__ , dtype=timesteps.dtype , device=timesteps.device )
snake_case_ = self.time_proj(a__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
snake_case_ = timesteps_projected.to(dtype=self.dtype )
snake_case_ = self.time_embedding(a__ )
if self.embedding_proj_norm is not None:
snake_case_ = self.embedding_proj_norm(a__ )
snake_case_ = self.embedding_proj(a__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
snake_case_ = self.encoder_hidden_states_proj(a__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
snake_case_ = self.proj_in(a__ )
snake_case_ = self.positional_embedding.to(hidden_states.dtype )
snake_case_ = []
snake_case_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(a__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
snake_case_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
snake_case_ = hidden_states[:, None, :]
snake_case_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
snake_case_ = self.prd_embedding.to(hidden_states.dtype ).expand(a__ , -1 , -1 )
additional_embeds.append(a__ )
snake_case_ = torch.cat(
a__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
snake_case_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
snake_case_ = F.pad(
a__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
snake_case_ = hidden_states + positional_embeddings
if attention_mask is not None:
snake_case_ = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
snake_case_ = F.pad(a__ , (0, self.additional_embeddings) , value=0.0 )
snake_case_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
snake_case_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
snake_case_ = self.norm_in(a__ )
for block in self.transformer_blocks:
snake_case_ = block(a__ , attention_mask=a__ )
snake_case_ = self.norm_out(a__ )
if self.prd_embedding is not None:
snake_case_ = hidden_states[:, -1]
else:
snake_case_ = hidden_states[:, additional_embeddings_len:]
snake_case_ = self.proj_to_clip_embeddings(a__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=a__ )
def lowerCAmelCase__ ( self , a__ ) -> str:
'''simple docstring'''
snake_case_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 85 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
'''simple docstring'''
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _snake_case( ) -> Dict:
'''simple docstring'''
A__ = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=SCREAMING_SNAKE_CASE__ )
A__ = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
TestCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
# Parse args
A__ , A__ = parser.parse_known_args()
if not hasattr(SCREAMING_SNAKE_CASE__ , 'func' ):
parser.print_help()
exit(1 )
A__ = parse_unknown_args(SCREAMING_SNAKE_CASE__ )
# Run
A__ = args.func(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
service.run()
if __name__ == "__main__":
main()
| 7 | 0 |
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase :Optional[Any] = logging.getLogger(__name__)
def _a ( _lowercase : Optional[int]=2 , _lowercase : List[Any]=3 , _lowercase : List[str]=16 , _lowercase : int = 10 , _lowercase : int = 2 ):
'''simple docstring'''
def get_dataset(_lowercase : Union[str, Any] ):
__UpperCAmelCase : int = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_lowercase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__UpperCAmelCase : Dict = get_dataset(_lowercase )
__UpperCAmelCase : List[Any] = get_dataset(_lowercase )
__UpperCAmelCase : Dict = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
__UpperCAmelCase : Any = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def _a ( _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Any , _lowercase : str , _lowercase : Tuple , _lowercase : List[Any]=None ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = []
for epoch in range(_lowercase ):
# Train quickly
model.train()
for batch in dataloader:
__UpperCAmelCase : int = batch
__UpperCAmelCase : Optional[Any] = model(_lowercase )
__UpperCAmelCase : Any = torch.nn.functional.mse_loss(_lowercase , _lowercase )
accelerator.backward(_lowercase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class a ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] ) -> int:
super().__init__()
__UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) )
__UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.randn(1 ) )
def lowerCamelCase__ ( self : List[str] , snake_case : str ) -> Dict:
return x * self.a + self.b
class a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Any ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase : Optional[int] = DummyModel()
__UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCAmelCase : List[str] = dummy_dataloaders()
__UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=snake_case , automatic_checkpoint_naming=snake_case )
# Train baseline
__UpperCAmelCase : str = Accelerator(project_config=snake_case )
__UpperCAmelCase : Optional[int] = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def lowerCamelCase__ ( self : List[str] ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase : Any = DummyModel()
__UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCAmelCase : Tuple = dummy_dataloaders()
# Train baseline
__UpperCAmelCase : Dict = Accelerator()
__UpperCAmelCase : int = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case )
# Save initial
__UpperCAmelCase : Dict = os.path.join(snake_case , '''initial''' )
accelerator.save_state(snake_case )
(__UpperCAmelCase) : Optional[Any] = model.a.item(), model.b.item()
__UpperCAmelCase : Any = optimizer.state_dict()
__UpperCAmelCase : Dict = train(3 , snake_case , snake_case , snake_case , snake_case )
(__UpperCAmelCase) : str = model.a.item(), model.b.item()
__UpperCAmelCase : Dict = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCAmelCase : List[Any] = DummyModel()
__UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCAmelCase : Optional[Any] = dummy_dataloaders()
__UpperCAmelCase : List[Any] = Accelerator()
__UpperCAmelCase : List[Any] = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case )
accelerator.load_state(snake_case )
(__UpperCAmelCase) : Optional[int] = model.a.item(), model.b.item()
__UpperCAmelCase : Union[str, Any] = optimizer.state_dict()
self.assertEqual(snake_case , snake_case )
self.assertEqual(snake_case , snake_case )
self.assertEqual(snake_case , snake_case )
__UpperCAmelCase : str = train(2 , snake_case , snake_case , snake_case , snake_case )
# Save everything
__UpperCAmelCase : Any = os.path.join(snake_case , '''checkpoint''' )
accelerator.save_state(snake_case )
# Load everything back in and make sure all states work
accelerator.load_state(snake_case )
test_rands += train(1 , snake_case , snake_case , snake_case , snake_case )
(__UpperCAmelCase) : Optional[Any] = model.a.item(), model.b.item()
__UpperCAmelCase : Any = optimizer.state_dict()
self.assertEqual(snake_case , snake_case )
self.assertEqual(snake_case , snake_case )
self.assertEqual(snake_case , snake_case )
self.assertEqual(snake_case , snake_case )
def lowerCamelCase__ ( self : str ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase : List[str] = DummyModel()
__UpperCAmelCase : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCAmelCase : Tuple = dummy_dataloaders()
__UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=snake_case )
# Train baseline
__UpperCAmelCase : Optional[Any] = Accelerator(project_dir=snake_case , project_config=snake_case )
__UpperCAmelCase : Tuple = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case )
# Save initial
accelerator.save_state()
(__UpperCAmelCase) : Tuple = model.a.item(), model.b.item()
__UpperCAmelCase : List[str] = optimizer.state_dict()
__UpperCAmelCase : List[Any] = train(3 , snake_case , snake_case , snake_case , snake_case )
(__UpperCAmelCase) : Any = model.a.item(), model.b.item()
__UpperCAmelCase : Tuple = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCAmelCase : Optional[Any] = DummyModel()
__UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCAmelCase : str = dummy_dataloaders()
__UpperCAmelCase : Any = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=snake_case )
__UpperCAmelCase : List[Any] = Accelerator(project_dir=snake_case , project_config=snake_case )
__UpperCAmelCase : Union[str, Any] = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case )
accelerator.load_state(os.path.join(snake_case , '''checkpoints''' , '''checkpoint_0''' ) )
(__UpperCAmelCase) : int = model.a.item(), model.b.item()
__UpperCAmelCase : Optional[Any] = optimizer.state_dict()
self.assertEqual(snake_case , snake_case )
self.assertEqual(snake_case , snake_case )
self.assertEqual(snake_case , snake_case )
__UpperCAmelCase : Any = train(2 , snake_case , snake_case , snake_case , snake_case )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(snake_case , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , snake_case , snake_case , snake_case , snake_case )
(__UpperCAmelCase) : str = model.a.item(), model.b.item()
__UpperCAmelCase : Any = optimizer.state_dict()
self.assertEqual(snake_case , snake_case )
self.assertEqual(snake_case , snake_case )
self.assertEqual(snake_case , snake_case )
self.assertEqual(snake_case , snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__UpperCAmelCase : str = torch.tensor([1, 2, 3] )
__UpperCAmelCase : Union[str, Any] = torch.tensor([2, 3, 4] )
__UpperCAmelCase : List[Any] = DummyModel()
__UpperCAmelCase : List[str] = torch.optim.Adam(net.parameters() )
__UpperCAmelCase : Tuple = Accelerator()
with self.assertRaises(snake_case ) as ve:
accelerator.register_for_checkpointing(snake_case , snake_case , snake_case , snake_case )
__UpperCAmelCase : Tuple = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase : Union[str, Any] = DummyModel()
__UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCAmelCase : Any = torch.optim.lr_scheduler.StepLR(snake_case , step_size=1 , gamma=0.99 )
__UpperCAmelCase : List[Any] = dummy_dataloaders()
__UpperCAmelCase : Dict = ProjectConfiguration(automatic_checkpoint_naming=snake_case )
# Train baseline
__UpperCAmelCase : Any = Accelerator(project_dir=snake_case , project_config=snake_case )
__UpperCAmelCase : Any = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case )
# Save initial
accelerator.save_state()
__UpperCAmelCase : int = scheduler.state_dict()
train(3 , snake_case , snake_case , snake_case , snake_case , snake_case )
self.assertNotEqual(snake_case , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(snake_case , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(snake_case , scheduler.state_dict() )
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase : Tuple = DummyModel()
__UpperCAmelCase : Tuple = ProjectConfiguration(automatic_checkpoint_naming=snake_case , total_limit=2 )
# Train baseline
__UpperCAmelCase : Optional[Any] = Accelerator(project_dir=snake_case , project_config=snake_case )
__UpperCAmelCase : Optional[Any] = accelerator.prepare(snake_case )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(snake_case , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def lowerCamelCase__ ( self : str ) -> List[Any]:
__UpperCAmelCase : int = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase :Optional[Any] = "/tmp/accelerate/state_checkpointing"
__UpperCAmelCase :Tuple = DummyModel()
__UpperCAmelCase :Dict = torch.optim.Adam(params=model.parameters(), lr=1e-3)
__UpperCAmelCase :Optional[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__UpperCAmelCase :Any = dummy_dataloaders()
__UpperCAmelCase :Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase :Any = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase :Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase :List[str] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase :Optional[int] = group["params"][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase :Union[str, Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
__UpperCAmelCase :Optional[int] = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
__UpperCAmelCase :Tuple = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 357 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( _a ):
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case : Dict , snake_case : Dict=13 , snake_case : str=7 , snake_case : Dict=True , snake_case : Any=True , snake_case : Optional[Any]=True , snake_case : Optional[Any]=True , snake_case : List[str]=99 , snake_case : str=32 , snake_case : Any=5 , snake_case : List[str]=4 , snake_case : List[str]=37 , snake_case : int="gelu" , snake_case : int=0.1 , snake_case : int=0.1 , snake_case : Union[str, Any]=512 , snake_case : int=16 , snake_case : Optional[Any]=2 , snake_case : List[Any]=0.02 , snake_case : Any=False , snake_case : int=True , snake_case : Union[str, Any]="None" , snake_case : str=3 , snake_case : Union[str, Any]=4 , snake_case : Any=None , ) -> List[Any]:
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : Any = seq_length
__UpperCAmelCase : List[Any] = is_training
__UpperCAmelCase : List[str] = use_input_mask
__UpperCAmelCase : Union[str, Any] = use_token_type_ids
__UpperCAmelCase : Any = use_labels
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Any = type_vocab_size
__UpperCAmelCase : Tuple = type_sequence_label_size
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Dict = num_labels
__UpperCAmelCase : Any = num_choices
__UpperCAmelCase : Any = relative_attention
__UpperCAmelCase : Dict = position_biased_input
__UpperCAmelCase : Optional[int] = pos_att_type
__UpperCAmelCase : Dict = scope
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__UpperCAmelCase : Any = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : int = None
__UpperCAmelCase : str = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = self.get_config()
__UpperCAmelCase : Dict = 300
return config
def lowerCamelCase__ ( self : Any , snake_case : int ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCamelCase__ ( self : List[Any] , snake_case : Optional[int] , snake_case : Any , snake_case : List[Any] , snake_case : Any , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = DebertaModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )[0]
__UpperCAmelCase : Tuple = model(snake_case , token_type_ids=snake_case )[0]
__UpperCAmelCase : Optional[int] = model(snake_case )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCamelCase__ ( self : Optional[int] , snake_case : int , snake_case : Tuple , snake_case : Any , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : List[str] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = DebertaForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : int = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Tuple , snake_case : List[Any] , snake_case : str , snake_case : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : List[Any] = DebertaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case )
def lowerCamelCase__ ( self : str , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] , snake_case : Dict , snake_case : Optional[int] ) -> int:
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Union[str, Any] = DebertaForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : str , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : str , snake_case : Optional[int] , snake_case : int , snake_case : int , snake_case : str ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = DebertaForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Optional[int] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : str ) -> int:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = config_and_inputs
__UpperCAmelCase : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Dict = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Any = False
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Dict = DebertaModelTester(self )
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> List[str]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> str:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case )
@slow
def lowerCamelCase__ ( self : Dict ) -> Tuple:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : str = DebertaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
pass
@slow
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
__UpperCAmelCase : Any = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
__UpperCAmelCase : Any = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__UpperCAmelCase : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : int = model(snake_case , attention_mask=snake_case )[0]
# compare the actual values for a slice.
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' ) | 240 | 0 |
'''simple docstring'''
from __future__ import annotations
_UpperCamelCase = 10
def lowercase_ ( lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Any = 1
__UpperCAmelCase : Optional[int] = max(lowercase_ )
while placement <= max_digit:
# declare and initialize empty buckets
__UpperCAmelCase : Any = [[] for _ in range(lowercase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__UpperCAmelCase : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(lowercase_ )
# put each buckets' contents into list_of_ints
__UpperCAmelCase : Any = 0
for b in range(lowercase_ ):
for i in buckets[b]:
__UpperCAmelCase : Union[str, Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 254 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
A__ = args.pruning_method
A__ = args.threshold
A__ = args.model_name_or_path.rstrip('''/''' )
A__ = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
A__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = TopKBinarizer.apply(lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ , A__ = -0.1, 1.1
A__ = torch.sigmoid(lowercase_ )
A__ = s * (r - l) + l
A__ = s_bar.clamp(min=0.0 , max=1.0 )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
A__ = os.path.join(
os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" )
if not os.path.isdir(lowercase_ ):
shutil.copytree(lowercase_ , lowercase_ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
_lowerCamelCase : int = parser.parse_args()
main(args)
| 14 | 0 |
import math
import os
import sys
def _UpperCamelCase ( snake_case__ ) -> str:
__UpperCAmelCase : Any = ""
try:
with open(snake_case__, "rb" ) as binary_file:
__UpperCAmelCase : str = binary_file.read()
for dat in data:
__UpperCAmelCase : List[Any] = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> None:
lexicon.pop(snake_case__ )
__UpperCAmelCase : Dict = last_match_id
if math.loga(snake_case__ ).is_integer():
for curr_key in lexicon:
__UpperCAmelCase : Any = "0" + lexicon[curr_key]
__UpperCAmelCase : List[Any] = bin(snake_case__ )[2:]
def _UpperCamelCase ( snake_case__ ) -> str:
__UpperCAmelCase : List[Any] = {"0": "0", "1": "1"}
__UpperCAmelCase , __UpperCAmelCase : Tuple = "", ""
__UpperCAmelCase : Optional[Any] = len(snake_case__ )
for i in range(len(snake_case__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__UpperCAmelCase : Optional[Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case__, snake_case__, snake_case__, snake_case__ )
index += 1
__UpperCAmelCase : Dict = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__UpperCAmelCase : Dict = lexicon[curr_string]
result += last_match_id
return result
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
__UpperCAmelCase : Dict = os.path.getsize(snake_case__ )
__UpperCAmelCase : Tuple = bin(snake_case__ )[2:]
__UpperCAmelCase : List[Any] = len(snake_case__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def _UpperCamelCase ( snake_case__, snake_case__ ) -> None:
__UpperCAmelCase : Tuple = 8
try:
with open(snake_case__, "wb" ) as opened_file:
__UpperCAmelCase : Dict = [
to_write[i : i + byte_length]
for i in range(0, len(snake_case__ ), snake_case__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case__, 2 ).to_bytes(1, byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def _UpperCamelCase ( snake_case__, snake_case__ ) -> None:
__UpperCAmelCase : List[Any] = read_file_binary(snake_case__ )
__UpperCAmelCase : int = compress_data(snake_case__ )
__UpperCAmelCase : Dict = add_file_length(snake_case__, snake_case__ )
write_file_binary(snake_case__, snake_case__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 342 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case ( _lowercase ):
def __init__( self: Optional[Any] , __lowerCamelCase: NestedDataStructureLike[PathLike] , __lowerCamelCase: Optional[NamedSplit] = None , __lowerCamelCase: Optional[Features] = None , __lowerCamelCase: str = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Tuple , ) -> str:
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : Union[str, Any] = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths}
__UpperCAmelCase : int = Text(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , )
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
# Build iterable dataset
if self.streaming:
__UpperCAmelCase : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : Any = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
__UpperCAmelCase : Dict = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 342 | 1 |
SCREAMING_SNAKE_CASE :Any = 256
# Modulus to hash a string
SCREAMING_SNAKE_CASE :Union[str, Any] = 100_0003
def UpperCAmelCase ( a_ , a_ ) -> bool:
"""simple docstring"""
__A = len(a_ )
__A = len(a_ )
if p_len > t_len:
return False
__A = 0
__A = 0
__A = 1
# Calculating the hash of pattern and substring of text
for i in range(a_ ):
__A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
__A = "abc1abc12"
__A = "alskfjaldsabc1abc1abc12k23adsfabcabc"
__A = "alskfjaldsk23adsfabcabc"
assert rabin_karp(a_ , a_ ) and not rabin_karp(a_ , a_ )
# Test 2)
__A = "ABABX"
__A = "ABABZABABYABABX"
assert rabin_karp(a_ , a_ )
# Test 3)
__A = "AAAB"
__A = "ABAAAAAB"
assert rabin_karp(a_ , a_ )
# Test 4)
__A = "abcdabcy"
__A = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(a_ , a_ )
# Test 5)
__A = "Lü"
__A = "Lüsai"
assert rabin_karp(a_ , a_ )
__A = "Lue"
assert not rabin_karp(a_ , a_ )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 15 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class a ( _lowerCamelCase ):
def A_ ( self : str ):
snake_case_ = tempfile.mkdtemp()
snake_case_ = 8
# DPR tok
snake_case_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
snake_case_ = os.path.join(lowercase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
snake_case_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
snake_case_ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
snake_case_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case_ = {'''unk_token''': '''<unk>'''}
snake_case_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
snake_case_ = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase_ ) )
def A_ ( self : Union[str, Any] ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def A_ ( self : Union[str, Any] ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def A_ ( self : int ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def A_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def A_ ( self : str ):
snake_case_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def A_ ( self : str ):
snake_case_ = self.get_dummy_dataset()
snake_case_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
snake_case_ = dataset
snake_case_ = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def A_ ( self : str , lowercase_ : bool ):
snake_case_ = self.get_dummy_dataset()
snake_case_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
snake_case_ = os.path.join(self.tmpdirname , '''dataset''' )
snake_case_ = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
snake_case_ = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
snake_case_ = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , lowercase_ ) , )
return retriever
def A_ ( self : Tuple ):
snake_case_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
snake_case_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
snake_case_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(lowercase_ , open(lowercase_ , '''wb''' ) )
snake_case_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
snake_case_ = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def A_ ( self : Optional[Any] ):
snake_case_ = 1
snake_case_ = self.get_dummy_canonical_hf_index_retriever()
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A_ ( self : str ):
snake_case_ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
snake_case_ = self.get_dummy_dataset()
retriever.save_pretrained(lowercase_ )
snake_case_ = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
def A_ ( self : int ):
snake_case_ = 1
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A_ ( self : int ):
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowercase_ )
snake_case_ = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
def A_ ( self : str ):
snake_case_ = 1
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A_ ( self : Any ):
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowercase_ )
snake_case_ = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
def A_ ( self : Any ):
snake_case_ = 1
snake_case_ = self.get_dummy_legacy_index_retriever()
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , lowercase_ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A_ ( self : int ):
snake_case_ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowercase_ )
snake_case_ = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def A_ ( self : List[str] ):
import torch
snake_case_ = 1
snake_case_ = self.get_dummy_canonical_hf_index_retriever()
snake_case_ = [[5, 7], [10, 11]]
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ )
snake_case_ ,snake_case_ ,snake_case_ = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertIsInstance(lowercase_ , np.ndarray )
snake_case_ = retriever(
lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ , return_tensors='''pt''' , )
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowercase_ , torch.Tensor )
self.assertIsInstance(lowercase_ , torch.Tensor )
self.assertIsInstance(lowercase_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def A_ ( self : Tuple ):
snake_case_ = self.get_dpr_ctx_encoder_tokenizer()
snake_case_ = 1
snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
retriever.set_ctx_encoder_tokenizer(lowercase_ )
snake_case_ = [[5, 7], [10, 11]]
snake_case_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ )
self.assertEqual(
len(lowercase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , lowercase_ ) # check for doc token related keys in dictionary.
| 56 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__a :Dict = logging.get_logger(__name__)
__a :List[Any] = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = 'imagegpt'
_lowerCamelCase : Tuple = ['past_key_values']
_lowerCamelCase : Tuple = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[str] , UpperCAmelCase : List[Any]=512 + 1 , UpperCAmelCase : Optional[int]=32 * 32 , UpperCAmelCase : str=512 , UpperCAmelCase : Union[str, Any]=24 , UpperCAmelCase : str=8 , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[str]="quick_gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Tuple=1E-5 , UpperCAmelCase : str=0.02 , UpperCAmelCase : int=True , UpperCAmelCase : str=True , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : int , ):
A_ = vocab_size
A_ = n_positions
A_ = n_embd
A_ = n_layer
A_ = n_head
A_ = n_inner
A_ = activation_function
A_ = resid_pdrop
A_ = embd_pdrop
A_ = attn_pdrop
A_ = layer_norm_epsilon
A_ = initializer_range
A_ = scale_attn_weights
A_ = use_cache
A_ = scale_attn_by_inverse_layer_idx
A_ = reorder_and_upcast_attn
A_ = tie_word_embeddings
super().__init__(tie_word_embeddings=UpperCAmelCase , **UpperCAmelCase )
class _a ( snake_case_ ):
"""simple docstring"""
@property
def __A ( self : Dict ):
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def __A ( self : List[str] , UpperCAmelCase : "FeatureExtractionMixin" , UpperCAmelCase : int = 1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional["TensorType"] = None , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 32 , UpperCAmelCase : int = 32 , ):
A_ = self._generate_dummy_images(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = dict(preprocessor(images=UpperCAmelCase , return_tensors=UpperCAmelCase ) )
return inputs | 329 |
import math
__a :Union[str, Any] = 10
__a :Union[str, Any] = 7
__a :int = BALLS_PER_COLOUR * NUM_COLOURS
def __snake_case ( __UpperCamelCase : int = 20 ):
"""simple docstring"""
A_ = math.comb(__UpperCamelCase ,__UpperCamelCase )
A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase )
A_ = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20)) | 329 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ :List[str] = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :int = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :int = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
lowercase__ :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 101 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase :
def __init__( self ,A__ ,A__=2 ,A__=3 ,A__=4 ,A__=2 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=True ,A__=9_9 ,A__=3_6 ,A__=3 ,A__=4 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=1_6 ,A__=2 ,A__=0.02 ,A__=6 ,A__=6 ,A__=3 ,A__=4 ,A__=None ,A__=1_0_0_0 ,):
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = patch_size
lowercase = text_seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = coordinate_size
lowercase = shape_size
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowercase = text_seq_length
lowercase = (image_size // patch_size) ** 2 + 1
lowercase = self.text_seq_length + self.image_seq_length
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size)
lowercase = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase = bbox[i, j, 3]
lowercase = bbox[i, j, 1]
lowercase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase = bbox[i, j, 2]
lowercase = bbox[i, j, 0]
lowercase = t
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.text_seq_length])
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size)
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels)
lowercase = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = LayoutLMvaModel(config=A__)
model.to(A__)
model.eval()
# text + image
lowercase = model(A__ ,pixel_values=A__)
lowercase = model(
A__ ,bbox=A__ ,pixel_values=A__ ,attention_mask=A__ ,token_type_ids=A__)
lowercase = model(A__ ,bbox=A__ ,pixel_values=A__ ,token_type_ids=A__)
lowercase = model(A__ ,bbox=A__ ,pixel_values=A__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
# text only
lowercase = model(A__)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size))
# image only
lowercase = model(pixel_values=A__)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = LayoutLMvaForSequenceClassification(A__)
model.to(A__)
model.eval()
lowercase = model(
A__ ,bbox=A__ ,pixel_values=A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = LayoutLMvaForTokenClassification(config=A__)
model.to(A__)
model.eval()
lowercase = model(
A__ ,bbox=A__ ,pixel_values=A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = LayoutLMvaForQuestionAnswering(config=A__)
model.to(A__)
model.eval()
lowercase = model(
A__ ,bbox=A__ ,pixel_values=A__ ,attention_mask=A__ ,token_type_ids=A__ ,start_positions=A__ ,end_positions=A__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Dict =False
lowercase_ : Any =False
lowercase_ : Tuple =False
lowercase_ : Any =(
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int =(
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def A__ ( self):
lowercase = LayoutLMvaModelTester(self)
lowercase = ConfigTester(self ,config_class=A__ ,hidden_size=3_7)
def A__ ( self ,A__ ,A__ ,A__=False):
lowercase = copy.deepcopy(A__)
if model_class in get_values(A__):
lowercase = {
k: v.unsqueeze(1).expand(-1 ,self.model_tester.num_choices ,-1).contiguous()
if isinstance(A__ ,torch.Tensor) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A__):
lowercase = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=A__)
elif model_class in get_values(A__):
lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A__)
lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A__)
elif model_class in [
*get_values(A__),
]:
lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A__)
elif model_class in [
*get_values(A__),
]:
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=A__ ,)
return inputs_dict
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__)
@slow
def A__ ( self):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = LayoutLMvaModel.from_pretrained(A__)
self.assertIsNotNone(A__)
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase ( unittest.TestCase ):
@cached_property
def A__ ( self):
return LayoutLMvaImageProcessor(apply_ocr=A__) if is_vision_available() else None
@slow
def A__ ( self):
lowercase = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''').to(A__)
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=A__ ,return_tensors='''pt''').pixel_values.to(A__)
lowercase = torch.tensor([[1, 2]])
lowercase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0)
# forward pass
lowercase = model(
input_ids=input_ids.to(A__) ,bbox=bbox.to(A__) ,pixel_values=pixel_values.to(A__) ,)
# verify the logits
lowercase = torch.Size((1, 1_9_9, 7_6_8))
self.assertEqual(outputs.last_hidden_state.shape ,A__)
lowercase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]]).to(A__)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,A__ ,atol=1E-4))
| 101 | 1 |
"""simple docstring"""
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 357 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__UpperCAmelCase = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
__UpperCAmelCase = dataset.iloc[:, 1:2].values
__UpperCAmelCase = dataset.iloc[:, 2].values
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0)
__UpperCAmelCase = PolynomialFeatures(degree=4)
__UpperCAmelCase = poly_reg.fit_transform(X)
__UpperCAmelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def _snake_case ( ) -> str:
'''simple docstring'''
plt.scatter(lowercase__ , lowercase__ , color="""red""" )
plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 1 | 0 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_00 , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , ):
"""simple docstring"""
lowercase_ : Optional[int] = parent
lowercase_ : Tuple = 1_00
lowercase_ : Any = batch_size
lowercase_ : Union[str, Any] = image_size
lowercase_ : Dict = patch_size
lowercase_ : List[str] = num_channels
lowercase_ : str = is_training
lowercase_ : Any = use_labels
lowercase_ : Optional[int] = hidden_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : Union[str, Any] = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : Dict = type_sequence_label_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : str = scope
lowercase_ : str = out_indices
lowercase_ : List[str] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ : Tuple = (image_size // patch_size) ** 2
lowercase_ : int = num_patches + 1
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[str] = None
lowercase_ : int = None
if self.use_labels:
lowercase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def _snake_case ( self ):
"""simple docstring"""
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = BeitModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = BeitForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : int = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = self.type_sequence_label_size
lowercase_ : List[Any] = BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ : Optional[int] = 1
lowercase_ : Optional[Any] = BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Tuple = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = self.num_labels
lowercase_ : List[str] = BeitForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
lowercase_ : int = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = BeitModelTester(self )
lowercase_ : Dict = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def _snake_case ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Any = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : int = [*signature.parameters.keys()]
lowercase_ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling]:
continue
lowercase_ : int = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
lowercase_ : str = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ : Any = False
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase_ : int = model_class(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(__SCREAMING_SNAKE_CASE )
model.train()
lowercase_ : Dict = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : str = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Dict = BeitModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def _snake_case ( self ):
"""simple docstring"""
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : int = self.default_image_processor
lowercase_ : Optional[int] = prepare_img()
lowercase_ : str = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values.to(__SCREAMING_SNAKE_CASE )
# prepare bool_masked_pos
lowercase_ : Union[str, Any] = torch.ones((1, 1_96) , dtype=torch.bool ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase_ : str = model(pixel_values=__SCREAMING_SNAKE_CASE , bool_masked_pos=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = outputs.logits
# verify the logits
lowercase_ : Optional[int] = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-2 ) )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = self.default_image_processor
lowercase_ : Union[str, Any] = prepare_img()
lowercase_ : Any = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase_ : List[Any] = model(**__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = outputs.logits
# verify the logits
lowercase_ : Any = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
lowercase_ : Any = 2_81
self.assertEqual(logits.argmax(-1 ).item() , __SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = self.default_image_processor
lowercase_ : Any = prepare_img()
lowercase_ : Any = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase_ : List[str] = model(**__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = outputs.logits
# verify the logits
lowercase_ : Union[str, Any] = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
lowercase_ : str = 23_96
self.assertEqual(logits.argmax(-1 ).item() , __SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowercase_ : Any = model.to(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = BeitImageProcessor(do_resize=__SCREAMING_SNAKE_CASE , size=6_40 , do_center_crop=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowercase_ : Any = Image.open(ds[0]['''file'''] )
lowercase_ : Tuple = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase_ : Union[str, Any] = model(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = outputs.logits
# verify the logits
lowercase_ : Any = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
lowercase_ : int = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
lowercase_ : Dict = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=__SCREAMING_SNAKE_CASE , )
else:
lowercase_ : int = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=__SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowercase_ : str = model.to(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = BeitImageProcessor(do_resize=__SCREAMING_SNAKE_CASE , size=6_40 , do_center_crop=__SCREAMING_SNAKE_CASE )
lowercase_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowercase_ : Optional[int] = Image.open(ds[0]['''file'''] )
lowercase_ : Union[str, Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = outputs.logits.detach().cpu()
lowercase_ : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE , target_sizes=[(5_00, 3_00)] )
lowercase_ : List[Any] = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
| 93 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =[
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : Any ):
"""simple docstring"""
__a , __a =emb.weight.shape
__a =nn.Linear(_snake_case , _snake_case , bias=_snake_case )
__a =emb.weight.data
return lin_layer
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =torch.load(_snake_case , map_location='cpu' )
__a =Namespace(**checkpoint['cfg']['model'] )
__a =checkpoint['model']
remove_ignore_keys_(_snake_case )
__a =state_dict['decoder.embed_tokens.weight'].shape[0]
__a ={key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
__a =XGLMConfig(
vocab_size=_snake_case , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
__a =XGLMForCausalLM(_snake_case )
__a =model.load_state_dict(_snake_case , strict=_snake_case )
print(_snake_case )
__a =make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_lowerCAmelCase : str = parser.parse_args()
_lowerCAmelCase : Union[str, Any] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 218 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=3 , lowerCamelCase=32 , lowerCamelCase=3 , lowerCamelCase=10 , lowerCamelCase=[10, 20, 30, 40] , lowerCamelCase=[1, 1, 2, 1] , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="relu" , lowerCamelCase=3 , lowerCamelCase=None , ) -> Optional[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = embeddings_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_act
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = len(lowerCamelCase )
def lowerCAmelCase_ ( self ) -> str:
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = self.get_config()
return config, pixel_values
def lowerCAmelCase_ ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> List[Any]:
snake_case_ = FlaxRegNetModel(config=lowerCamelCase )
snake_case_ = model(lowerCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> Any:
snake_case_ = self.num_labels
snake_case_ = FlaxRegNetForImageClassification(config=lowerCamelCase )
snake_case_ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self ) -> int:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ = config_and_inputs
snake_case_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( __UpperCamelCase , unittest.TestCase ):
lowerCamelCase_ : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : Any = False
lowerCamelCase_ : Tuple = False
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ = FlaxRegNetModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self ) -> Dict:
return
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Any:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCAmelCase_ ( self ) -> Tuple:
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCAmelCase_ ( self ) -> Any:
pass
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowerCamelCase )
snake_case_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Any:
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case_ = model_class(lowerCamelCase )
snake_case_ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ) , expected_num_stages + 1 )
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> int:
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case_ = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
snake_case_ = model_class(lowerCamelCase )
@jax.jit
def model_jitted(lowerCamelCase , **lowerCamelCase ):
return model(pixel_values=lowerCamelCase , **lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
snake_case_ = model_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
snake_case_ = model_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase( ) -> Any:
'''simple docstring'''
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self ) -> Any:
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case_ = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowerCamelCase , return_tensors="""np""" )
snake_case_ = model(**lowerCamelCase )
# verify the logits
snake_case_ = (1, 1000)
self.assertEqual(outputs.logits.shape , lowerCamelCase )
snake_case_ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
| 356 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __lowerCamelCase :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=True , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ) -> Union[str, Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_multiple_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = weight_tying
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def lowerCAmelCase_ ( self ) -> str:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase_ ( self ) -> Optional[int]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self ) -> int:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.prepare_config_and_inputs()
snake_case_ = True
return config, input_ids, input_mask, token_labels
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
snake_case_ = GPTNeoXJapaneseModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase )
snake_case_ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
snake_case_ = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
snake_case_ = True
snake_case_ = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# first forward pass
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase , use_cache=lowerCamelCase )
snake_case_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ = model(lowerCamelCase , attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase )
snake_case_ = output_from_no_past["""hidden_states"""][0]
snake_case_ = model(
lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["""hidden_states"""][0]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
lowerCamelCase_ : Any = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCamelCase_ : str = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ : Any = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Dict = False
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Optional[int] = False
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ = GPTNeoXJapaneseModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self ) -> str:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ) -> str:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
# This regression test was failing with PyTorch < 1.3
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case_ = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
@slow
def lowerCAmelCase_ ( self ) -> Any:
snake_case_ = """abeja/gpt-neox-japanese-2.7b"""
snake_case_ = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
snake_case_ = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
snake_case_ = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase )
snake_case_ = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase )
snake_case_ = []
for prompt in prompts:
snake_case_ = tokenizer(lowerCamelCase , return_tensors="""pt""" ).input_ids
snake_case_ = model.generate(lowerCamelCase , max_length=50 )
snake_case_ = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase , lowerCamelCase ) | 34 | 0 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,A : pyspark.sql.DataFrame ,A : Optional[NamedSplit] = None ,A : Optional[Features] = None ,A : bool = True ,A : str = None ,A : bool = False ,A : str = None ,A : bool = True ,A : str = "arrow" ,**A : List[Any] ,):
super().__init__(
split=A ,features=A ,cache_dir=A ,keep_in_memory=A ,streaming=A ,**A ,)
__A = load_from_cache_file
__A = file_format
__A = Spark(
df=A ,features=A ,cache_dir=A ,working_dir=A ,**A ,)
def UpperCamelCase_ ( self : Optional[Any] ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__A = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=A ,file_format=self._file_format ,)
return self.builder.as_dataset(split=self.split )
| 15 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Optional[int] = """upernet"""
def __init__( self , lowerCamelCase=None , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=[1, 2, 3, 6] , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=384 , lowerCamelCase=256 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=255 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__a = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = backbone_config.get("model_type" )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(lowerCamelCase )
__a = backbone_config
__a = hidden_size
__a = initializer_range
__a = pool_scales
__a = use_auxiliary_head
__a = auxiliary_loss_weight
__a = auxiliary_in_channels
__a = auxiliary_channels
__a = auxiliary_num_convs
__a = auxiliary_concat_input
__a = loss_ignore_index
def a__ ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.__class__.model_type
return output
| 261 | 0 |
"""simple docstring"""
def snake_case (A_ :Any , A_ :Union[str, Any] , A_ :Any , A_ :Union[str, Any] ):
'''simple docstring'''
a : str = [False] * len(__UpperCAmelCase )
a : Any = []
queue.append(__UpperCAmelCase )
a : str = True
while queue:
a : int = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__UpperCAmelCase )
a : int = True
a : Tuple = u
return visited[t]
def snake_case (A_ :str , A_ :Tuple , A_ :Union[str, Any] ):
'''simple docstring'''
a : str = [-1] * (len(__UpperCAmelCase ))
a : List[str] = 0
while bfs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
a : int = float('Inf' )
a : Tuple = sink
while s != source:
# Find the minimum value in select path
a : Optional[int] = min(__UpperCAmelCase , graph[parent[s]][s] )
a : Dict = parent[s]
max_flow += path_flow
a : List[Any] = sink
while v != source:
a : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
a : int = parent[v]
return max_flow
_UpperCamelCase : Optional[int] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_UpperCamelCase , _UpperCamelCase : Optional[Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 352 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class snake_case ( UpperCAmelCase , unittest.TestCase ):
__magic_name__ = XLNetTokenizer
__magic_name__ = XLNetTokenizerFast
__magic_name__ = True
__magic_name__ = True
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a : Tuple = XLNetTokenizer(A , keep_accents=A )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Optional[int] = '<s>'
a : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
a : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<eod>' )
self.assertEqual(len(A ) , 1_0_0_6 )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Optional[Any] = XLNetTokenizer(A , keep_accents=A )
a : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] )
a : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
a : Union[str, Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] )
a : Tuple = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : Optional[int] = XLNetTokenizer(A , do_lower_case=A )
a : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['▁he', 'll', 'o'] )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : str = XLNetTokenizer(A , do_lower_case=A )
a : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Dict = XLNetTokenizer.from_pretrained('xlnet-base-cased' )
a : int = tokenizer.encode('sequence builders' , add_special_tokens=A )
a : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=A )
a : Optional[int] = tokenizer.build_inputs_with_special_tokens(A )
a : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : Union[str, Any] = {'input_ids': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
| 186 | 0 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase (_UpperCamelCase = 100 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[Any] = 2
for i in range(2 , max_n + 1 ):
__lowerCAmelCase : Any = pre_numerator
__lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1
__lowerCAmelCase : int = cur_numerator
__lowerCAmelCase : Dict = e_cont * pre_numerator + temp
return sum_digits(_UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }') | 86 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
__lowerCAmelCase : Dict = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__lowerCAmelCase : List[str] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if latents is None:
__lowerCAmelCase : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=77 , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Tuple = text_inputs.input_ids
__lowerCAmelCase : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowerCAmelCase : Dict = text_input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Dict = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Optional[int] = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : List[str]
if negative_prompt is None:
__lowerCAmelCase : Union[str, Any] = [''] * batch_size
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !="
f" {type(_SCREAMING_SNAKE_CASE )}." )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
__lowerCAmelCase : Optional[int] = negative_prompt
__lowerCAmelCase : Tuple = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=77 , truncation=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Union[str, Any] = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Any = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCAmelCase : List[str] = negative_prompt_embeds.shape[1]
__lowerCAmelCase : Any = negative_prompt_embeds.repeat(1 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = uncond_text_encoder_hidden_states.shape[1]
__lowerCAmelCase : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
__lowerCAmelCase : Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
__lowerCAmelCase : Optional[Any] = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
__lowerCAmelCase : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__lowerCAmelCase : int = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase : Union[str, Any] = torch.device(f"cuda:{gpu_id}" )
__lowerCAmelCase : List[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__lowerCAmelCase : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCAmelCase : Any = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__lowerCAmelCase , __lowerCAmelCase : Any = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
if self.safety_checker is not None:
__lowerCAmelCase , __lowerCAmelCase : Dict = cpu_offload_with_hook(self.safety_checker , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
__lowerCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}" )
__lowerCAmelCase : Dict = self._execution_device
__lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt
__lowerCAmelCase : Optional[int] = guidance_scale > 1.0
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._encode_prompt(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : Optional[Any] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : int = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.scheduler.timesteps
__lowerCAmelCase : int = self.unet.config.in_channels
__lowerCAmelCase , __lowerCAmelCase : Any = get_new_h_w(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
__lowerCAmelCase : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase : Union[str, Any] = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
__lowerCAmelCase : Optional[Any] = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
__lowerCAmelCase , __lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = noise_pred.chunk(2 )
__lowerCAmelCase , __lowerCAmelCase : int = variance_pred.chunk(2 )
__lowerCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCAmelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase : List[str] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
# post-processing
__lowerCAmelCase : Tuple = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__lowerCAmelCase : List[str] = image * 0.5 + 0.5
__lowerCAmelCase : Dict = image.clamp(0 , 1 )
__lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE ) | 86 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
UpperCamelCase_ : List[str] = logging.get_logger(__name__)
class _a ( __lowerCAmelCase ):
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None:
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." ,_SCREAMING_SNAKE_CASE ,)
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
| 142 |
'''simple docstring'''
import pprint
import requests
UpperCamelCase_ : Tuple = '''https://zenquotes.io/api'''
def __a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def __a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
UpperCamelCase_ : Any = random_quotes()
pprint.pprint(response)
| 142 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : List[str] = logging.get_logger(__name__)
class _A ( __magic_name__ , __magic_name__):
SCREAMING_SNAKE_CASE : Dict = '''maskformer-swin'''
SCREAMING_SNAKE_CASE : Dict = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=96 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE_ : List[str] = patch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = embed_dim
SCREAMING_SNAKE_CASE_ : Dict = depths
SCREAMING_SNAKE_CASE_ : Dict = len(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = num_heads
SCREAMING_SNAKE_CASE_ : List[Any] = window_size
SCREAMING_SNAKE_CASE_ : List[Any] = mlp_ratio
SCREAMING_SNAKE_CASE_ : Tuple = qkv_bias
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = drop_path_rate
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ : int = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ : str = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
SCREAMING_SNAKE_CASE_ : List[str] = ['stem'] + [f"stage{idx}" for idx in range(1 , len(_SCREAMING_SNAKE_CASE ) + 1 )]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 253 |
import os
import numpy
import onnx
def A_ ( a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = a.name
SCREAMING_SNAKE_CASE_ : Dict = b.name
SCREAMING_SNAKE_CASE_ : Optional[int] = ''
SCREAMING_SNAKE_CASE_ : int = ''
SCREAMING_SNAKE_CASE_ : Tuple = a == b
SCREAMING_SNAKE_CASE_ : Dict = name_a
SCREAMING_SNAKE_CASE_ : List[Any] = name_b
return res
def A_ ( a , a , a ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a , a )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , a , a )
_graph_replace_input_with(node_proto.attribute[1].g , a , a )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , a , a )
def A_ ( a , a , a ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(a , a , a )
def A_ ( a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = list(model.graph.initializer )
SCREAMING_SNAKE_CASE_ : int = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
SCREAMING_SNAKE_CASE_ : List[Any] = inits[i].name
SCREAMING_SNAKE_CASE_ : int = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , a , a )
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.dirname(a )
SCREAMING_SNAKE_CASE_ : Tuple = os.path.basename(a )
SCREAMING_SNAKE_CASE_ : str = onnx.load(os.path.join(a , a ) )
SCREAMING_SNAKE_CASE_ : Dict = list(model.graph.initializer )
SCREAMING_SNAKE_CASE_ : str = set()
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Dict = 0
for i in range(len(a ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(a ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(a )
dup_set.add(a )
SCREAMING_SNAKE_CASE_ : Optional[int] = inits[j].data_type
SCREAMING_SNAKE_CASE_ : List[Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , a )
total_reduced_size += mem_size
SCREAMING_SNAKE_CASE_ : Any = inits[i].name
SCREAMING_SNAKE_CASE_ : Tuple = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a )
else:
SCREAMING_SNAKE_CASE_ : Tuple = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
SCREAMING_SNAKE_CASE_ : Tuple = sorted(a )
_remove_dup_initializers_from_model(a , a , a )
SCREAMING_SNAKE_CASE_ : List[Any] = 'optimized_' + model_file_name
SCREAMING_SNAKE_CASE_ : Any = os.path.join(a , a )
onnx.save(a , a )
return new_model
| 253 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__a = [[1, 2, 4], [1, 2, 3, 4]]
__a = DisjunctiveConstraint(_a )
self.assertTrue(isinstance(dc.token_ids , _a ) )
with self.assertRaises(_a ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_a ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__a = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_a ):
DisjunctiveConstraint(_a ) # fails here
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = [[1, 2, 3], [1, 2, 4]]
__a = DisjunctiveConstraint(_a )
__a = dc.update(1 )
__a = stepped is True and completed is False and reset is False
self.assertTrue(_a )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__a = dc.update(2 )
__a = stepped is True and completed is False and reset is False
self.assertTrue(_a )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__a = dc.update(3 )
__a = stepped is True and completed is True and reset is False
self.assertTrue(_a )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__a = DisjunctiveConstraint(_a )
__a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__a = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 370 | import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class a__ ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase ) -> List[Any]:
__a = parent
__a = config_class
__a = has_text_modality
__a = kwargs
__a = common_properties
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = self.config_class(**self.inputs_dict )
__a = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase ):
try:
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(
getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(UpperCAmelCase , UpperCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase ):
try:
__a = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(UpperCAmelCase , UpperCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.config_class(**self.inputs_dict )
__a = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(UpperCAmelCase , 'config.json' )
config_first.to_json_file(UpperCAmelCase )
__a = self.config_class.from_json_file(UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase )
__a = self.config_class.from_pretrained(UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = self.config_class(**self.inputs_dict )
__a = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(UpperCAmelCase , UpperCAmelCase )
config_first.save_pretrained(UpperCAmelCase )
__a = self.config_class.from_pretrained(UpperCAmelCase , subfolder=UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__a = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
if self.config_class.is_composition:
return
__a = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = copy.deepcopy(UpperCAmelCase )
__a = self.config_class(**UpperCAmelCase )
__a = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase , UpperCAmelCase ) != value:
wrong_values.append((key, getattr(UpperCAmelCase , UpperCAmelCase ), value) )
if len(UpperCAmelCase ) > 0:
__a = '\n'.join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 197 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
UpperCAmelCase : List[str] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TOKEN
HfFolder.save_token(UpperCAmelCase_ )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] ) -> List[Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCAmelCase_ )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(f'{USER}/test-model-flax' )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params ) )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase_ , 1E-3 , msg=f'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase_ , repo_id="""test-model-flax""" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(f'{USER}/test-model-flax' )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params ) )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase_ , 1E-3 , msg=f'{key} not identical' )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCAmelCase_ )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params ) )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase_ , 1E-3 , msg=f'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
UpperCAmelCase_ , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params ) )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase_ , 1E-3 , msg=f'{key} not identical' )
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = flatten_dict(modela.params )
__SCREAMING_SNAKE_CASE = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
__SCREAMING_SNAKE_CASE = False
return models_are_equal
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) )
with self.assertRaises(UpperCAmelCase_ ):
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_ )
self.assertTrue(check_models_equal(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , max_shard_size="""10KB""" )
with self.assertRaises(UpperCAmelCase_ ):
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_ )
self.assertTrue(check_models_equal(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """bert"""
__SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(UpperCAmelCase_ ):
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """bert"""
__SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(UpperCAmelCase_ ):
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
| 267 |
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_SCREAMING_SNAKE_CASE = i + 1
else:
_SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 306 | 0 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'''nvidia/segformer-b0-finetuned-ade-512-512''': (
'''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'''
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _UpperCamelCase ( lowerCamelCase_ ):
_UpperCamelCase : Dict = '''segformer'''
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int]=3 , _SCREAMING_SNAKE_CASE: Dict=4 , _SCREAMING_SNAKE_CASE: Tuple=[2, 2, 2, 2] , _SCREAMING_SNAKE_CASE: List[str]=[8, 4, 2, 1] , _SCREAMING_SNAKE_CASE: Any=[32, 64, 160, 256] , _SCREAMING_SNAKE_CASE: Tuple=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE: List[Any]=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE: Any=[1, 2, 5, 8] , _SCREAMING_SNAKE_CASE: Optional[int]=[4, 4, 4, 4] , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: Optional[int]=0.0 , _SCREAMING_SNAKE_CASE: Dict=0.0 , _SCREAMING_SNAKE_CASE: Dict=0.1 , _SCREAMING_SNAKE_CASE: Any=0.02 , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: Tuple=1e-6 , _SCREAMING_SNAKE_CASE: Dict=256 , _SCREAMING_SNAKE_CASE: List[str]=255 , **_SCREAMING_SNAKE_CASE: Any , ) -> Dict:
"""simple docstring"""
super().__init__(**__snake_case )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , __snake_case , )
UpperCamelCase_ = num_channels
UpperCamelCase_ = num_encoder_blocks
UpperCamelCase_ = depths
UpperCamelCase_ = sr_ratios
UpperCamelCase_ = hidden_sizes
UpperCamelCase_ = patch_sizes
UpperCamelCase_ = strides
UpperCamelCase_ = mlp_ratios
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = classifier_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = decoder_hidden_size
UpperCamelCase_ = kwargs.get("reshape_last_stage" , __snake_case )
UpperCamelCase_ = semantic_loss_ignore_index
class _UpperCamelCase ( lowerCamelCase_ ):
_UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def lowercase ( self: Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase ( self: Optional[Any] ) -> float:
"""simple docstring"""
return 1e-4
@property
def lowercase ( self: Union[str, Any] ) -> int:
"""simple docstring"""
return 12
| 371 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCamelCase :
def __init__( self: str ) -> Any:
"""simple docstring"""
UpperCamelCase_ = ""
UpperCamelCase_ = ""
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = 256
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ = cva.imread(_SCREAMING_SNAKE_CASE , 0 )
UpperCamelCase_ = copy.deepcopy(self.img )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
UpperCamelCase_ = np.sum(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCamelCase_ = x[i] / self.k
self.sk += prk
UpperCamelCase_ = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase_ = int(last % last )
UpperCamelCase_ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase_ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase_ = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase_ = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def lowercase ( self: Any ) -> Optional[Any]:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def lowercase ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCAmelCase = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
_UpperCAmelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 328 | 0 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
_a = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
_a = logging.WARNING
def __A ( )-> Tuple:
"""simple docstring"""
_UpperCAmelCase = os.getenv('DATASETS_VERBOSITY' , __lowerCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def __A ( )-> str:
"""simple docstring"""
return __name__.split('.' )[0]
def __A ( )-> logging.Logger:
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def __A ( )-> None:
"""simple docstring"""
_UpperCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def __A ( )-> None:
"""simple docstring"""
_UpperCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def __A ( __lowerCAmelCase = None )-> logging.Logger:
"""simple docstring"""
if name is None:
_UpperCAmelCase = _get_library_name()
return logging.getLogger(__lowerCAmelCase )
def __A ( )-> int:
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def __A ( __lowerCAmelCase )-> None:
"""simple docstring"""
_get_library_root_logger().setLevel(__lowerCAmelCase )
def __A ( )-> Dict:
"""simple docstring"""
return set_verbosity(__lowerCAmelCase )
def __A ( )-> Optional[int]:
"""simple docstring"""
return set_verbosity(__lowerCAmelCase )
def __A ( )-> Tuple:
"""simple docstring"""
return set_verbosity(__lowerCAmelCase )
def __A ( )-> Optional[int]:
"""simple docstring"""
return set_verbosity(__lowerCAmelCase )
def __A ( )-> None:
"""simple docstring"""
_UpperCAmelCase = False
def __A ( )-> None:
"""simple docstring"""
_UpperCAmelCase = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ): # pylint: disable=unused-argument
"""simple docstring"""
_UpperCAmelCase = args[0] if args else None
def __iter__( self ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , UpperCAmelCase ):
"""simple docstring"""
def empty_fn(*UpperCAmelCase , **UpperCAmelCase ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
"""simple docstring"""
return self
def __exit__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
return
_a = True
class __lowerCamelCase :
"""simple docstring"""
def __call__( self , *UpperCAmelCase , UpperCAmelCase=False , **UpperCAmelCase ):
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*UpperCAmelCase , **UpperCAmelCase )
else:
return EmptyTqdm(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_a = _tqdm_cls()
def __A ( )-> bool:
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def __A ( )-> Dict:
"""simple docstring"""
global _tqdm_active
_UpperCAmelCase = True
def __A ( )-> str:
"""simple docstring"""
global _tqdm_active
_UpperCAmelCase = False
| 39 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_a = get_logger(__name__)
class __lowerCamelCase ( enum.Enum):
"""simple docstring"""
UpperCamelCase__ = "all_checks"
UpperCamelCase__ = "basic_checks"
UpperCamelCase__ = "no_checks"
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None )-> str:
"""simple docstring"""
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
_UpperCAmelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_UpperCAmelCase = ' for ' + verification_name if verification_name is not None else ''
if len(__lowerCAmelCase ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
_UpperCAmelCase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowerCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowerCAmelCase ) )
logger.info('All the splits matched successfully.' )
def __A ( __lowerCAmelCase , __lowerCAmelCase = True )-> dict:
"""simple docstring"""
if record_checksum:
_UpperCAmelCase = shaaaa()
with open(__lowerCAmelCase , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(__lowerCAmelCase )
_UpperCAmelCase = m.hexdigest()
else:
_UpperCAmelCase = None
return {"num_bytes": os.path.getsize(__lowerCAmelCase ), "checksum": checksum}
def __A ( __lowerCAmelCase )-> List[str]:
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 39 | 1 |
"""simple docstring"""
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowercase_ = logging.getLogger(__name__)
lowercase_ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__SCREAMING_SNAKE_CASE )} , )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__UpperCAmelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def __UpperCAmelCase ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__UpperCAmelCase : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The input training data file (a text file).'} )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__UpperCAmelCase : Optional[int] = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
__UpperCAmelCase : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
__UpperCAmelCase : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__UpperCAmelCase : float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def __UpperCAmelCase ( self ):
if self.train_file is not None:
__a = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__a = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] ) -> List[str]:
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as f:
__a = [json.loads(lowerCAmelCase__ ) for line in f.read().splitlines() if (len(lowerCAmelCase__ ) > 0 and not line.isspace())]
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
__a = {c: dataset[c] for c in dataset.column_names}
__a = refs
return Dataset.from_dict(lowerCAmelCase__ )
def lowercase ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
__a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[:{data_args.validation_split_percentage}%]''' , )
__a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[{data_args.validation_split_percentage}%:]''' , )
else:
__a = {}
if data_args.train_file is not None:
__a = data_args.train_file
if data_args.validation_file is not None:
__a = data_args.validation_file
__a = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
__a = '''text'''
__a = load_dataset(lowerCAmelCase__ , data_files=lowerCAmelCase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__a = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase__ )
elif model_args.model_name_or_path:
__a = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase__ )
else:
__a = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
__a = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__a = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase__ )
elif model_args.model_name_or_path:
__a = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase__ )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
__a = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
__a = AutoModelForMaskedLM.from_config(lowerCAmelCase__ )
model.resize_token_embeddings(len(lowerCAmelCase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__a = datasets['''train'''].column_names
else:
__a = datasets['''validation'''].column_names
__a = '''text''' if '''text''' in column_names else column_names[0]
__a = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase__ : Tuple ):
# Remove empty lines
__a = [line for line in examples['''text'''] if len(lowerCAmelCase__ ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=data_args.max_seq_length )
__a = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__a = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__a = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__a = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__a = False
# Data collator
# This one will take care of randomly masking the tokens.
__a = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__a = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__a = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__a = model_args.model_name_or_path
else:
__a = None
__a = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
__a = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
__a = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__a = trainer.evaluate()
__a = math.exp(eval_output['''eval_loss'''] )
__a = perplexity
__a = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
return results
def lowercase ( lowerCAmelCase__ : Optional[int] ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 11 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 'funnel'
__UpperCAmelCase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self , _a=30_522 , _a=[4, 4, 4] , _a=None , _a=2 , _a=768 , _a=12 , _a=64 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1E-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = block_sizes
__a = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__a = num_decoder_layers
__a = d_model
__a = n_head
__a = d_head
__a = d_inner
__a = hidden_act
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = initializer_range
__a = initializer_std
__a = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__a = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__a = attention_type
__a = separate_cls
__a = truncate_seq
__a = pool_q_only
super().__init__(**_a )
@property
def __UpperCAmelCase ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __UpperCAmelCase ( self ):
return len(self.block_sizes )
@num_blocks.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 11 | 1 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[Any] ):
_UpperCAmelCase : List[Any] = logging.get_logger()
# the current default level is logging.WARNING
_UpperCAmelCase : int = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(A )
def _A ( self : int ):
_UpperCAmelCase : int = logging.get_verbosity()
_UpperCAmelCase : int = logging.get_logger("transformers.models.bart.tokenization_bart" )
_UpperCAmelCase : List[str] = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(A ) as cl:
logger.warning(A )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(A ) as cl:
logger.warning(A )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(A ) as cl:
logger.warning(A )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(A )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def _A ( self : Dict ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_UpperCAmelCase : str = logging.get_logger("transformers.models.bart.tokenization_bart" )
_UpperCAmelCase : Optional[int] = os.getenv("TRANSFORMERS_VERBOSITY" , A )
_UpperCAmelCase : Any = logging.log_levels[env_level_str]
_UpperCAmelCase : Tuple = logging.get_verbosity()
self.assertEqual(
A , A , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
_UpperCAmelCase : int = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def _A ( self : Union[str, Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
_UpperCAmelCase : List[Any] = logging.logging.getLogger()
with CaptureLogger(A ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def _A ( self : List[Any] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
_UpperCAmelCase : Optional[Any] = logging.get_logger("transformers.models.bart.tokenization_bart" )
_UpperCAmelCase : List[Any] = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(A ) as cl:
logger.warning_advice(A )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(A ) as cl:
logger.warning_advice(A )
self.assertEqual(cl.out , msg + "\n" )
def UpperCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 31 | '''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def UpperCamelCase_ ( _UpperCAmelCase : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def UpperCamelCase_ ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : np.ndarray ) -> XGBClassifier:
"""simple docstring"""
_UpperCAmelCase : Any = XGBClassifier()
classifier.fit(_UpperCAmelCase , _UpperCAmelCase )
return classifier
def UpperCamelCase_ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase : List[str] = load_iris()
_UpperCAmelCase , _UpperCAmelCase : Dict = data_handling(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = train_test_split(
_UpperCAmelCase , _UpperCAmelCase , test_size=0.2_5 )
_UpperCAmelCase : Optional[Any] = iris["target_names"]
# Create an XGBoost Classifier from the training data
_UpperCAmelCase : Tuple = xgboost(_UpperCAmelCase , _UpperCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , display_labels=_UpperCAmelCase , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 31 | 1 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , **_SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
super().__init__(features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = Sql(
cache_dir=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , sql=_SCREAMING_SNAKE_CASE , con=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=_SCREAMING_SNAKE_CASE , download_mode=_SCREAMING_SNAKE_CASE , verification_mode=_SCREAMING_SNAKE_CASE , base_path=_SCREAMING_SNAKE_CASE , )
# Build dataset for splits
_UpperCAmelCase = self.builder.as_dataset(
split='train' , verification_mode=_SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
_UpperCAmelCase = dataset
_UpperCAmelCase = name
_UpperCAmelCase = con
_UpperCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_UpperCAmelCase = num_proc
_UpperCAmelCase = to_sql_kwargs
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.to_sql_kwargs.pop('sql' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.to_sql_kwargs.pop('con' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.to_sql_kwargs.pop('index' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._write(index=_SCREAMING_SNAKE_CASE , **self.to_sql_kwargs )
return written
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = args
_UpperCAmelCase = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_UpperCAmelCase = query_table(
table=self.dataset.data , key=slice(_SCREAMING_SNAKE_CASE , offset + self.batch_size ) , indices=self.dataset._indices , )
_UpperCAmelCase = batch.to_pandas()
_UpperCAmelCase = df.to_sql(self.name , self.con , index=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return num_rows or len(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_UpperCAmelCase , _UpperCAmelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 185 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCAmelCase__ ( a__: NDArray[floataa] , a__: NDArray[floataa] , a__: list[int] , a__: int , ) -> list[float]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = coefficient_matrix.shape
_UpperCAmelCase , _UpperCAmelCase = constant_matrix.shape
if rowsa != colsa:
_UpperCAmelCase = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(a__ )
if colsa != 1:
_UpperCAmelCase = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(a__ )
if rowsa != rowsa:
_UpperCAmelCase = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(a__ )
if len(a__ ) != rowsa:
_UpperCAmelCase = (
'Number of initial values must be equal to number of rows in coefficient '
F'''matrix but received {len(a__ )} and {rowsa}'''
)
raise ValueError(a__ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
_UpperCAmelCase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_UpperCAmelCase , _UpperCAmelCase = table.shape
strictly_diagonally_dominant(a__ )
# Iterates the whole matrix for given number of times
for _ in range(a__ ):
_UpperCAmelCase = []
for row in range(a__ ):
_UpperCAmelCase = 0
for col in range(a__ ):
if col == row:
_UpperCAmelCase = table[row][col]
elif col == cols - 1:
_UpperCAmelCase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_UpperCAmelCase = (temp + val) / denom
new_val.append(a__ )
_UpperCAmelCase = new_val
return [float(a__ ) for i in new_val]
def lowerCAmelCase__ ( a__: NDArray[floataa] ) -> bool:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = table.shape
_UpperCAmelCase = True
for i in range(0 , a__ ):
_UpperCAmelCase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(lowerCAmelCase__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(lowerCAmelCase__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""image_processor""", """tokenizer"""]
__UpperCAmelCase : Optional[Any] ="""CLIPImageProcessor"""
__UpperCAmelCase : Union[str, Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , __a=None , __a=None , **__a ):
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
__lowerCAmelCase = kwargs.pop("feature_extractor" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self , __a=None , __a=None , __a=None , **__a ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowerCAmelCase = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
__lowerCAmelCase = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 57 | 0 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def _lowercase ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 56 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __a (UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = RoFormerTokenizer
_SCREAMING_SNAKE_CASE :List[Any] = RoFormerTokenizerFast
_SCREAMING_SNAKE_CASE :Tuple = True
_SCREAMING_SNAKE_CASE :Dict = True
def _a ( self ) -> List[Any]:
"""simple docstring"""
super().setUp()
def _a ( self , **_a ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_a )
def _a ( self , **_a ) -> List[Any]:
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """永和服装饰品有限公司,今天天气非常好"""
SCREAMING_SNAKE_CASE__ : List[Any] = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.tokenize(_a )
self.assertListEqual(_a , output_text.split() )
SCREAMING_SNAKE_CASE__ : int = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : List[str] = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.tokenize(_a )
self.assertListEqual(_a , output_text.split() )
SCREAMING_SNAKE_CASE__ : int = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
pass
def _a ( self ) -> List[Any]:
"""simple docstring"""
pass
def _a ( self ) -> Optional[int]:
"""simple docstring"""
pass
| 56 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
A_ : List[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
A_ : List[str] = 4
A_ : Any = 48
A_ : Tuple = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
A_ : Tuple = [6, 6, 6, 6]
A_ : int = 60
A_ : Optional[Any] = [6, 6, 6, 6]
A_ : Optional[int] = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
A_ : int = 4
A_ : str = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
A_ : str = 1
A_ : Union[str, Any] = 1
A_ : str = 1_26
A_ : Optional[int] = 7
A_ : List[Any] = 255.0
A_ : Dict = """"""
return config
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
A_ : int = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
A_ : Tuple = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
A_ : str = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
A_ : Union[str, Any] = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
A_ : Optional[int] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
A_ : Optional[Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
A_ : Any = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
A_ : Any = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
A_ : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
A_ : Tuple = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
A_ : Optional[Any] = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
A_ : Union[str, Any] = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
A_ : Union[str, Any] = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
A_ : Any = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
A_ : Tuple = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
A_ : Optional[int] = """layernorm.weight"""
if name == "norm.bias":
A_ : Optional[Any] = """layernorm.bias"""
if "conv_first" in name:
A_ : Tuple = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
A_ : int = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
A_ : Optional[Any] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
A_ : Optional[Any] = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
A_ : Dict = name.replace("""upsample.2""" , """upsample.convolution_1""" )
A_ : List[str] = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
A_ : Tuple = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
A_ : Any = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
A_ : List[Any] = """swin2sr.""" + name
return name
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A_ : Any = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
A_ : str = key.split(""".""" )
A_ : Dict = int(key_split[1] )
A_ : List[str] = int(key_split[4] )
A_ : Optional[int] = config.embed_dim
if "weight" in key:
A_ : List[Any] = val[:dim, :]
A_ : Optional[int] = val[dim : dim * 2, :]
A_ : Any = val[-dim:, :]
else:
A_ : Dict = val[:dim]
A_ : int = val[dim : dim * 2]
A_ : List[Any] = val[-dim:]
pass
else:
A_ : Optional[Any] = val
return orig_state_dict
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : int = get_config(lowerCamelCase__ )
A_ : Any = SwinaSRForImageSuperResolution(lowerCamelCase__ )
model.eval()
A_ : Any = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" )
A_ : Union[str, Any] = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
A_, A_ : str = model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowerCamelCase__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'Unexpected key {key} in state_dict' )
# verify values
A_ : Tuple = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
A_ : Union[str, Any] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" )
A_ : str = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
A_ : Tuple = 1_26 if """Jpeg""" in checkpoint_url else 2_56
A_ : Dict = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
A_ : str = transforms(lowerCamelCase__ ).unsqueeze(0 )
if config.num_channels == 1:
A_ : List[Any] = pixel_values[:, 0, :, :].unsqueeze(1 )
A_ : List[Any] = model(lowerCamelCase__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
A_ : str = torch.Size([1, 3, 5_12, 5_12] )
A_ : Optional[Any] = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
A_ : str = torch.Size([1, 3, 10_24, 10_24] )
A_ : Any = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
A_ : List[Any] = torch.Size([1, 3, 10_24, 10_24] )
A_ : Dict = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
A_ : Union[str, Any] = torch.Size([1, 3, 5_12, 5_12] )
A_ : int = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
A_ : Any = torch.Size([1, 3, 10_24, 10_24] )
A_ : List[str] = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowerCamelCase__ , atol=1E-3 )
print("""Looks ok!""" )
A_ : int = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
A_ : str = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
model.push_to_hub(f'caidas/{model_name}' )
processor.push_to_hub(f'caidas/{model_name}' )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
lowerCamelCase :int = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 206 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowerCamelCase :Tuple = '''path-to-your-trained-model'''
lowerCamelCase :Optional[int] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowerCamelCase :Optional[int] = '''A photo of sks dog in a bucket'''
lowerCamelCase :List[Any] = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''') | 206 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : List[Any]=1_0 , UpperCAmelCase__ : Union[str, Any]=1_8 , UpperCAmelCase__ : Union[str, Any]=3_0 , UpperCAmelCase__ : Optional[Any]=4_0_0 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : str=None , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Dict=None , ) -> List[str]:
lowerCAmelCase = size if size is not None else {'shortest_edge': 1_8}
lowerCAmelCase = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = num_frames
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
lowerCAmelCase = crop_size
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : int = VivitImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Optional[int] ) -> str:
lowerCAmelCase = VivitImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : str ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def __UpperCAmelCase ( self : Dict ) -> Any:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 355 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def a_ ( lowerCamelCase : str = "AAPL" ):
lowerCAmelCase = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase ).text , 'html.parser' )
lowerCAmelCase = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 55 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowercase : Any = """
Human: <<task>>
Assistant: """
lowercase : List[str] = """huggingface-tools/default-prompts"""
lowercase : str = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def A_ ( A__ , A__ , A__="run" ) -> Dict:
if prompt_or_repo_id is None:
a__ : Any = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , A__ ) is not None:
return prompt_or_repo_id
a__ : List[str] = cached_file(
A__ , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(A__ , 'r' , encoding='utf-8' ) as f:
return f.read()
| 99 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=UpperCAmelCase__ , )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 0 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ = TypeVar("""KEY""")
SCREAMING_SNAKE_CASE__ = TypeVar("""VAL""")
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class __lowerCamelCase ( Generic[KEY, VAL] ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
class __lowerCamelCase ( _Item ):
"""simple docstring"""
def __init__( self ) -> None:
'''simple docstring'''
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __bool__( self ) -> bool:
'''simple docstring'''
return False
SCREAMING_SNAKE_CASE__ = _DeletedItem()
class __lowerCamelCase ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self , UpperCAmelCase = 8 , UpperCAmelCase = 0.75 ) -> None:
'''simple docstring'''
lowercase_ = initial_block_size
lowercase_ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowercase_ = capacity_factor
lowercase_ = 0
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return hash(UpperCAmelCase ) % len(self._buckets )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
'''simple docstring'''
lowercase_ = self._buckets[ind]
if not stored:
lowercase_ = _Item(UpperCAmelCase , UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
lowercase_ = _Item(UpperCAmelCase , UpperCAmelCase )
return True
else:
return False
def A__ ( self ) -> bool:
'''simple docstring'''
lowercase_ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(UpperCAmelCase )
def A__ ( self ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowercase_ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def A__ ( self , UpperCAmelCase ) -> None:
'''simple docstring'''
lowercase_ = self._buckets
lowercase_ = [None] * new_size
lowercase_ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def A__ ( self ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def A__ ( self ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def A__ ( self , UpperCAmelCase ) -> Iterator[int]:
'''simple docstring'''
lowercase_ = self._get_bucket_index(UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
lowercase_ = self._get_next_ind(UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(UpperCAmelCase ):
if self._try_set(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
break
def __setitem__( self , UpperCAmelCase , UpperCAmelCase ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(UpperCAmelCase , UpperCAmelCase )
def __delitem__( self , UpperCAmelCase ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(UpperCAmelCase ):
lowercase_ = self._buckets[ind]
if item is None:
raise KeyError(UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
lowercase_ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , UpperCAmelCase ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(UpperCAmelCase ):
lowercase_ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(UpperCAmelCase )
def __len__( self ) -> int:
'''simple docstring'''
return self._len
def __iter__( self ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
'''simple docstring'''
lowercase_ = " ,".join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 369 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[32, 64, 128] , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2"] , UpperCAmelCase=[1, 2] , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = patch_norm
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = is_training
lowercase_ = scope
lowercase_ = use_labels
lowercase_ = type_sequence_label_size
lowercase_ = encoder_stride
lowercase_ = out_features
lowercase_ = out_indices
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = FocalNetModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
lowercase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase_ = None
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = FocalNetForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForMaskedImageModeling(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 , has_text_modality=UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.hidden_states
lowercase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# FocalNet has a different seq_length
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase_ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = reshaped_hidden_states[0].shape
lowercase_ = (
reshaped_hidden_states[0].view(UpperCAmelCase , UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = FocalNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase_ = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
# verify the logits
lowercase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = FocalNetConfig
lowerCAmelCase__ = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
| 297 | 0 |
from math import isclose, sqrt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> tuple[float, float, float]:
__lowerCamelCase : Tuple = point_y / 4 / point_x
__lowerCamelCase : Tuple = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__lowerCamelCase : List[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__lowerCamelCase : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__lowerCamelCase : Any = outgoing_gradient**2 + 4
__lowerCamelCase : Optional[int] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__lowerCamelCase : str = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
__lowerCamelCase : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__lowerCamelCase : Optional[Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__lowerCamelCase : Optional[Any] = x_minus if isclose(lowerCamelCase__ , lowerCamelCase__ ) else x_plus
__lowerCamelCase : Tuple = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1.4 , lowerCamelCase__ = -9.6 ) -> int:
__lowerCamelCase : int = 0
__lowerCamelCase : float = first_x_coord
__lowerCamelCase : float = first_y_coord
__lowerCamelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = next_point(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 73 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_:str = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 116 | 0 |
"""simple docstring"""
snake_case_ = """Tobias Carryer"""
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Tuple , lowercase_ :Dict , lowercase_ :int , lowercase_ :Any , lowercase_ :Dict=int(time() ) ) -> List[Any]: # noqa: B008
UpperCAmelCase = multiplier
UpperCAmelCase = increment
UpperCAmelCase = modulo
UpperCAmelCase = seed
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[int]:
UpperCAmelCase = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
snake_case_ = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 363 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
snake_case_ = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
snake_case_ = logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """maskformer"""
__UpperCamelCase = {"""hidden_size""": """mask_feature_size"""}
__UpperCamelCase = ["""resnet""", """swin"""]
__UpperCamelCase = ["""detr"""]
def __init__( self :Dict , lowercase_ :int = 2_56 , lowercase_ :int = 2_56 , lowercase_ :float = 0.1 , lowercase_ :bool = False , lowercase_ :Optional[Dict] = None , lowercase_ :Optional[Dict] = None , lowercase_ :float = 0.02 , lowercase_ :float = 1.0 , lowercase_ :float = 1.0 , lowercase_ :float = 1.0 , lowercase_ :float = 20.0 , lowercase_ :Optional[bool] = None , **lowercase_ :List[str] , ) -> Optional[int]:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = backbone_config.pop('model_type' )
UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase = config_class.from_dict(lowercase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase = (
decoder_config.pop('model_type' ) if isinstance(lowercase_ , lowercase_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {','.join(self.decoders_supported )}""" )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = CONFIG_MAPPING[decoder_type]
UpperCAmelCase = config_class.from_dict(lowercase_ )
UpperCAmelCase = backbone_config
UpperCAmelCase = decoder_config
# main feature dimension for the model
UpperCAmelCase = fpn_feature_size
UpperCAmelCase = mask_feature_size
# initializer
UpperCAmelCase = init_std
UpperCAmelCase = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase = cross_entropy_weight
UpperCAmelCase = dice_weight
UpperCAmelCase = mask_weight
UpperCAmelCase = use_auxiliary_loss
UpperCAmelCase = no_object_weight
UpperCAmelCase = output_auxiliary_logits
UpperCAmelCase = self.decoder_config.encoder_attention_heads
UpperCAmelCase = self.decoder_config.num_hidden_layers
super().__init__(**lowercase_ )
@classmethod
def UpperCAmelCase__ ( cls :int , lowercase_ :PretrainedConfig , lowercase_ :PretrainedConfig , **lowercase_ :int ) -> List[Any]:
return cls(
backbone_config=lowercase_ , decoder_config=lowercase_ , **lowercase_ , )
def UpperCAmelCase__ ( self :Tuple ) -> Dict[str, any]:
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.backbone_config.to_dict()
UpperCAmelCase = self.decoder_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 181 | 0 |
import numpy
# List of input, output pairs
a_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
a_ = (((515, 22, 13), 555), ((61, 35, 49), 150))
a_ = [2, 4, 1, 5]
a_ = len(train_data)
a_ = 0.009
def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : str="train" ) -> Optional[int]:
"""simple docstring"""
return calculate_hypothesis_value(UpperCamelCase_ , UpperCamelCase_ ) - output(
UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = 0
for i in range(len(UpperCamelCase_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] ) -> List[Any]:
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str]=m ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 0
for i in range(UpperCamelCase_ ):
if index == -1:
summation_value += _error(UpperCamelCase_ )
else:
summation_value += _error(UpperCamelCase_ ) * train_data[i][0][index]
return summation_value
def _a ( UpperCamelCase_ : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = summation_of_cost_derivative(UpperCamelCase_ , UpperCamelCase_ ) / m
return cost_derivative_value
def _a ( ) -> Dict:
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCAmelCase__ = 0.000_002
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
while True:
j += 1
lowerCAmelCase__ = [0, 0, 0, 0]
for i in range(0 , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = get_cost_derivative(i - 1 )
lowerCAmelCase__ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCamelCase_ , UpperCamelCase_ , atol=UpperCamelCase_ , rtol=UpperCamelCase_ , ):
break
lowerCAmelCase__ = temp_parameter_vector
print(("Number of iterations:", j) )
def _a ( ) -> Dict:
"""simple docstring"""
for i in range(len(UpperCamelCase_ ) ):
print(("Actual output value:", output(UpperCamelCase_ , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(UpperCamelCase_ , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 340 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = load_image(__UpperCAmelCase )
lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ = candidate_labels
lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase )
lowerCAmelCase__ = [text_inputs]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = model_inputs.pop("candidate_labels" )
lowerCAmelCase__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __UpperCAmelCase ):
lowerCAmelCase__ = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ = text_inputs[0][0]
lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = model_outputs.pop("candidate_labels" )
lowerCAmelCase__ = model_outputs["logits"][0]
if self.framework == "pt":
lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ = probs.tolist()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [scores]
elif self.framework == "tf":
lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 )
lowerCAmelCase__ = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCAmelCase__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 340 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ):
"""simple docstring"""
if config_name_or_path is None:
__lowercase ='facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
__lowercase =generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__lowercase =question_encoder_name_or_path
__lowercase =RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
__lowercase =RagConfig.from_pretrained(_lowerCAmelCase )
__lowercase =AutoConfig.from_pretrained(_lowerCAmelCase )
__lowercase =AutoConfig.from_pretrained(_lowerCAmelCase )
__lowercase =gen_config
__lowercase =question_encoder_config
__lowercase =model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
lowerCamelCase = parser.parse_args()
lowerCamelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 48 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """wavlm"""
def __init__( self : List[str] , _lowerCAmelCase : List[Any]=3_2 , _lowerCAmelCase : int=7_6_8 , _lowerCAmelCase : Any=1_2 , _lowerCAmelCase : Union[str, Any]=1_2 , _lowerCAmelCase : List[Any]=3_0_7_2 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : Dict=1e-5 , _lowerCAmelCase : List[Any]="group" , _lowerCAmelCase : Optional[Any]="gelu" , _lowerCAmelCase : Dict=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _lowerCAmelCase : Any=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase : Optional[Any]=(1_0, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : int=1_2_8 , _lowerCAmelCase : Tuple=1_6 , _lowerCAmelCase : Optional[int]=3_2_0 , _lowerCAmelCase : Union[str, Any]=8_0_0 , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Any=0.05 , _lowerCAmelCase : List[Any]=1_0 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Union[str, Any]=1_0 , _lowerCAmelCase : List[Any]=3_2_0 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Optional[int]=1_0_0 , _lowerCAmelCase : Tuple=2_5_6 , _lowerCAmelCase : Union[str, Any]=2_5_6 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple="mean" , _lowerCAmelCase : Any=False , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Any=2_5_6 , _lowerCAmelCase : Tuple=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , _lowerCAmelCase : Dict=(5, 3, 3, 1, 1) , _lowerCAmelCase : Dict=(1, 2, 3, 1, 1) , _lowerCAmelCase : int=5_1_2 , _lowerCAmelCase : Optional[int]=8_0 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : int=1 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Any=3 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : List[str]=None , **_lowerCAmelCase : List[str] , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase)
__lowercase =hidden_size
__lowercase =feat_extract_norm
__lowercase =feat_extract_activation
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =conv_bias
__lowercase =num_buckets
__lowercase =max_bucket_distance
__lowercase =num_conv_pos_embeddings
__lowercase =num_conv_pos_embedding_groups
__lowercase =len(self.conv_dim)
__lowercase =num_hidden_layers
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =num_attention_heads
__lowercase =hidden_dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =feat_proj_dropout
__lowercase =final_dropout
__lowercase =layerdrop
__lowercase =layer_norm_eps
__lowercase =initializer_range
__lowercase =num_ctc_classes
__lowercase =vocab_size
__lowercase =do_stable_layer_norm
__lowercase =use_weighted_layer_sum
__lowercase =classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase =apply_spec_augment
__lowercase =mask_time_prob
__lowercase =mask_time_length
__lowercase =mask_time_min_masks
__lowercase =mask_feature_prob
__lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
__lowercase =num_codevectors_per_group
__lowercase =num_codevector_groups
__lowercase =contrastive_logits_temperature
__lowercase =num_negatives
__lowercase =codevector_dim
__lowercase =proj_codevector_dim
__lowercase =diversity_loss_weight
# ctc loss
__lowercase =ctc_loss_reduction
__lowercase =ctc_zero_infinity
# adapter
__lowercase =add_adapter
__lowercase =adapter_kernel_size
__lowercase =adapter_stride
__lowercase =num_adapter_layers
__lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =xvector_output_dim
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 48 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCAmelCase = 16
_UpperCAmelCase = 32
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Tuple = 16 ):
'''simple docstring'''
A_ : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
A_ : str = load_dataset('glue' ,'mrpc' )
def tokenize_function(__lowercase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
A_ : Any = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=a_ ,max_length=a_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : List[str] = datasets.map(
a_ ,batched=a_ ,remove_columns=['idx', 'sentence1', 'sentence2'] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : Dict = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(__lowercase : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : Tuple = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : List[str] = 16
elif accelerator.mixed_precision != "no":
A_ : List[Any] = 8
else:
A_ : Union[str, Any] = None
return tokenizer.pad(
a_ ,padding='longest' ,max_length=a_ ,pad_to_multiple_of=a_ ,return_tensors='pt' ,)
# Instantiate dataloaders.
A_ : Union[str, Any] = DataLoader(
tokenized_datasets['train'] ,shuffle=a_ ,collate_fn=a_ ,batch_size=a_ )
A_ : Optional[Any] = DataLoader(
tokenized_datasets['validation'] ,shuffle=a_ ,collate_fn=a_ ,batch_size=a_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCAmelCase = mocked_dataloaders # noqa: F811
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Union[str, Any] ):
'''simple docstring'''
if os.environ.get('TESTING_MOCKED_DATALOADERS' ,a_ ) == "1":
A_ : Optional[int] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
A_ : Tuple = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,log_with='all' ,project_dir=args.project_dir )
else:
A_ : str = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : Any = config['lr']
A_ : str = int(config['num_epochs'] )
A_ : List[Any] = int(config['seed'] )
A_ : Tuple = int(config['batch_size'] )
set_seed(a_ )
A_ , A_ : List[Any] = get_dataloaders(a_ ,a_ )
A_ : Union[str, Any] = evaluate.load('glue' ,'mrpc' )
# If the batch size is too big we use gradient accumulation
A_ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A_ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
A_ : str = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' ,return_dict=a_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
A_ : Any = AdamW(params=model.parameters() ,lr=a_ )
# Instantiate scheduler
A_ : Tuple = get_linear_schedule_with_warmup(
optimizer=a_ ,num_warmup_steps=1_00 ,num_training_steps=(len(a_ ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : str = accelerator.prepare(
a_ ,a_ ,a_ ,a_ ,a_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
A_ : Union[str, Any] = os.path.split(a_ )[-1].split('.' )[0]
accelerator.init_trackers(a_ ,a_ )
# Now we train the model
for epoch in range(a_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
A_ : Dict = 0
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A_ : Union[str, Any] = model(**a_ )
A_ : str = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
A_ : Any = loss / gradient_accumulation_steps
accelerator.backward(a_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
A_ : Union[str, Any] = model(**a_ )
A_ : int = outputs.logits.argmax(dim=-1 )
A_ , A_ : int = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=a_ ,references=a_ ,)
A_ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' ,a_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(a_ ),
'epoch': epoch,
} ,step=a_ ,)
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Tuple = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' ,type=a_ ,default=a_ ,choices=['no', 'fp16', 'bf16', 'fp8'] ,help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' ,)
parser.add_argument('--cpu' ,action='store_true' ,help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking' ,action='store_true' ,help='Whether to load in all available experiment trackers from the environment and use them for logging.' ,)
parser.add_argument(
'--project_dir' ,type=a_ ,default='logs' ,help='Location on where to store experiment tracking logs` and relevent project information' ,)
A_ : Optional[int] = parser.parse_args()
A_ : Any = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(a_ ,a_ )
if __name__ == "__main__":
main()
| 140 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE :List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[str] = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 15 | 0 |
import datasets
_snake_case : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
_snake_case : Dict = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
_snake_case : Optional[int] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def lowercase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
return {"accuracy": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
| 207 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : List[str] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Union[str, Any]=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Dict, lowerCAmelCase_ : int=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ''
else:
__lowerCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = ViTConfig()
__lowerCAmelCase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
__lowerCAmelCase = True
__lowerCAmelCase = int(vit_name[-12:-10] )
__lowerCAmelCase = int(vit_name[-9:-6] )
else:
__lowerCAmelCase = 1000
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = int(vit_name[-6:-4] )
__lowerCAmelCase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
__lowerCAmelCase = 192
__lowerCAmelCase = 768
__lowerCAmelCase = 12
__lowerCAmelCase = 3
elif vit_name[9:].startswith('small' ):
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
__lowerCAmelCase = 768
__lowerCAmelCase = 2304
__lowerCAmelCase = 8
__lowerCAmelCase = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
elif vit_name[4:].startswith('huge' ):
__lowerCAmelCase = 1280
__lowerCAmelCase = 5120
__lowerCAmelCase = 32
__lowerCAmelCase = 16
# load original model from timm
__lowerCAmelCase = timm.create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_, lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
__lowerCAmelCase = ViTModel(lowerCAmelCase_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
__lowerCAmelCase = DeiTImageProcessor(size=config.image_size )
else:
__lowerCAmelCase = ViTImageProcessor(size=config.image_size )
__lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='pt' )
__lowerCAmelCase = encoding['pixel_values']
__lowerCAmelCase = model(lowerCAmelCase_ )
if base_model:
__lowerCAmelCase = timm_model.forward_features(lowerCAmelCase_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCAmelCase_, outputs.pooler_output, atol=1E-3 )
else:
__lowerCAmelCase = timm_model(lowerCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_, outputs.logits, atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 207 | 1 |
"""simple docstring"""
from __future__ import annotations
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = order
# a_{0} ... a_{k}
UpperCAmelCase_ : Tuple = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase_ : int = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase_ : List[str] = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase_ : Optional[Any] = [0.0] * self.order
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
if len(lowercase_ ) < self.order:
UpperCAmelCase_ : str = [1.0, *a_coeffs]
if len(lowercase_ ) != self.order + 1:
UpperCAmelCase_ : Any = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(lowercase_ )}"""
)
raise ValueError(lowercase_ )
if len(lowercase_ ) != self.order + 1:
UpperCAmelCase_ : Any = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(lowercase_ )}"""
)
raise ValueError(lowercase_ )
UpperCAmelCase_ : Tuple = a_coeffs
UpperCAmelCase_ : Any = b_coeffs
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase_ : List[Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase_ : Union[str, Any] = self.input_history[:-1]
UpperCAmelCase_ : Dict = self.output_history[:-1]
UpperCAmelCase_ : Optional[Any] = sample
UpperCAmelCase_ : Union[str, Any] = result
return result
| 61 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Any = '''EncodecFeatureExtractor'''
A__ : Optional[int] = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : Optional[int] , _snake_case : Optional[Any] , _snake_case : Optional[Any] ):
super().__init__(_snake_case , _snake_case )
__lowercase : List[Any] = self.feature_extractor
__lowercase : Tuple = False
def snake_case_ ( self : Optional[int] , _snake_case : Union[str, Any]=None , _snake_case : Optional[Any]=None , _snake_case : List[str]=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_snake_case , language=_snake_case , no_timestamps=_snake_case )
def __call__( self : str , *_snake_case : Tuple , **_snake_case : str ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_snake_case , **_snake_case )
__lowercase : Optional[Any] = kwargs.pop('''audio''' , _snake_case )
__lowercase : str = kwargs.pop('''sampling_rate''' , _snake_case )
__lowercase : Any = kwargs.pop('''text''' , _snake_case )
if len(_snake_case ) > 0:
__lowercase : Dict = args[0]
__lowercase : Any = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__lowercase : str = self.tokenizer(_snake_case , **_snake_case )
if audio is not None:
__lowercase : List[str] = self.feature_extractor(_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__lowercase : Tuple = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__lowercase : Tuple = audio_inputs['''padding_mask''']
return inputs
def snake_case_ ( self : int , *_snake_case : int , **_snake_case : Any ):
__lowercase : Dict = kwargs.pop('''audio''' , _snake_case )
__lowercase : Tuple = kwargs.pop('''padding_mask''' , _snake_case )
if len(_snake_case ) > 0:
__lowercase : str = args[0]
__lowercase : Tuple = args[1:]
if audio_values is not None:
return self._decode_audio(_snake_case , padding_mask=_snake_case )
else:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def snake_case_ ( self : Optional[int] , *_snake_case : int , **_snake_case : List[str] ):
return self.tokenizer.decode(*_snake_case , **_snake_case )
def snake_case_ ( self : Dict , _snake_case : List[Any] , _snake_case : Optional = None ):
__lowercase : Union[str, Any] = to_numpy(_snake_case )
__lowercase , __lowercase , __lowercase : Optional[int] = audio_values.shape
if padding_mask is None:
return list(_snake_case )
__lowercase : Optional[int] = to_numpy(_snake_case )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__lowercase : int = seq_len - padding_mask.shape[-1]
__lowercase : Optional[int] = 1 - self.feature_extractor.padding_value
__lowercase : Tuple = np.pad(_snake_case , ((0, 0), (0, difference)) , '''constant''' , constant_values=_snake_case )
__lowercase : str = audio_values.tolist()
for i in range(_snake_case ):
__lowercase : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__lowercase : Any = sliced_audio.reshape(_snake_case , -1 )
return audio_values
| 156 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Any = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
import re
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : List[Any] = re.compile(
R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" )
return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) )
if __name__ == "__main__":
__A : int = '0094702343221'
print(is_sri_lankan_phone_number(phone)) | 8 | 1 |
import os
import numpy
import onnx
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Optional[Any] = a.name
_lowercase : Dict = b.name
_lowercase : List[str] = ''
_lowercase : int = ''
_lowercase : Optional[int] = a == b
_lowercase : str = name_a
_lowercase : Optional[int] = name_b
return res
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase_ , lowerCamelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase_ , lowerCamelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase_ , lowerCamelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : List[str] = list(model.graph.initializer )
_lowercase : str = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_lowercase : List[Any] = inits[i].name
_lowercase : Optional[int] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
_lowercase : str = os.path.dirname(lowerCamelCase_ )
_lowercase : Tuple = os.path.basename(lowerCamelCase_ )
_lowercase : Optional[int] = onnx.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
_lowercase : Optional[int] = list(model.graph.initializer )
_lowercase : Dict = set()
_lowercase : Optional[int] = {}
_lowercase : Union[str, Any] = []
_lowercase : Optional[int] = 0
for i in range(len(lowerCamelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase_ )
dup_set.add(lowerCamelCase_ )
_lowercase : Optional[int] = inits[j].data_type
_lowercase : Optional[int] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase_ )
total_reduced_size += mem_size
_lowercase : str = inits[i].name
_lowercase : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase_ )
else:
_lowercase : Tuple = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
_lowercase : int = sorted(lowerCamelCase_ )
_remove_dup_initializers_from_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Tuple = 'optimized_' + model_file_name
_lowercase : int = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
onnx.save(lowerCamelCase_ , lowerCamelCase_ )
return new_model
| 21 |
import argparse
from collections import defaultdict
import yaml
_snake_case = "docs/source/en/_toctree.yml"
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = defaultdict(_lowerCamelCase )
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[str] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = new_doc_list
_lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase : str = []
for duplicate_key in duplicates:
_lowerCAmelCase : List[str] = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(_lowerCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
_lowerCAmelCase : Optional[Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCamelCase ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(_lowerCamelCase )
# Sort
return overview_doc
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : int = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : List[str] = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : Union[str, Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase : Optional[Any] = api_doc[scheduler_idx]["sections"]
_lowerCAmelCase : Optional[Any] = clean_doc_toc(_lowerCamelCase )
_lowerCAmelCase : int = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase : List[Any] = True
if overwrite:
_lowerCAmelCase : Dict = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase : Tuple = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : int = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : List[str] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = api_doc[pipeline_idx]["sections"]
_lowerCAmelCase : Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase : List[Any] = pipeline_doc["section"]
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if overwrite:
_lowerCAmelCase : Optional[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCamelCase )
# sort overall pipeline doc
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase : Dict = True
if overwrite:
_lowerCAmelCase : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase : Optional[int] = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_snake_case = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 36 | 0 |
from __future__ import annotations
import time
import numpy as np
_lowerCAmelCase : List[str] = [8, 5, 9, 7]
_lowerCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_lowerCAmelCase : List[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , __snake_case , __snake_case , __snake_case , ) -> None:
'''simple docstring'''
__a =claim_vector
__a =allocated_resources_table
__a =maximum_claim_table
def __magic_name__ ( self ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __magic_name__ ( self ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __magic_name__ ( self ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __magic_name__ ( self ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(__snake_case ): i for i in self.__need()}
def __magic_name__ ( self , **__snake_case ) -> None:
'''simple docstring'''
__a =self.__need()
__a =self.__allocated_resources_table
__a =self.__available_resources()
__a =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__a =False
for each_need in need_list:
__a =True
for index, need in enumerate(__snake_case ):
if need > available_resources[index]:
__a =False
break
if execution:
__a =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__a =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(__snake_case )
# update available/freed resources stack
__a =np.array(__snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__snake_case ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(__snake_case ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(__snake_case ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__snake_case ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308 |
def UpperCamelCase_( _snake_case : str , _snake_case : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_snake_case ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 308 | 1 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class UpperCamelCase__ :
def __init__(self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : List[str]=3 , snake_case_ : Any=7 , snake_case_ : int=True , snake_case_ : Any=True , snake_case_ : str=False , snake_case_ : str=True , snake_case_ : List[str]=9_9 , snake_case_ : Tuple=3_2 , snake_case_ : Any=5 , snake_case_ : Tuple=4 , snake_case_ : Dict=3_7 , snake_case_ : Union[str, Any]="gelu" , snake_case_ : Optional[Any]=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : Dict=5_1_2 , snake_case_ : Tuple=1_6 , snake_case_ : List[str]=2 , snake_case_ : str=0.02 , snake_case_ : List[Any]=3 , snake_case_ : List[str]=4 , snake_case_ : Any=None , ):
__a : int = parent
__a : int = batch_size
__a : Union[str, Any] = seq_length
__a : int = is_training
__a : Any = use_input_mask
__a : List[Any] = use_token_type_ids
__a : Optional[int] = use_labels
__a : Optional[int] = vocab_size
__a : Tuple = hidden_size
__a : str = num_hidden_layers
__a : Tuple = num_attention_heads
__a : List[str] = intermediate_size
__a : Optional[Any] = hidden_act
__a : List[str] = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : int = type_vocab_size
__a : Optional[Any] = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : Any = num_labels
__a : Any = num_choices
__a : List[str] = scope
def lowerCAmelCase (self : List[Any] ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[str] = None
if self.use_input_mask:
__a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__a : str = None
__a : List[str] = None
__a : Tuple = None
__a : Dict = None
if self.use_labels:
__a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__a : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase (self : Union[str, Any] ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=snake_case_ , )
def lowerCAmelCase (self : int , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Optional[Any] ):
__a : Dict = FalconModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : List[Any] = model(snake_case_ , attention_mask=snake_case_ )
__a : List[str] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase (self : Tuple , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , ):
__a : List[str] = True
__a : str = FalconModel(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : str = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )
__a : List[str] = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , )
__a : List[Any] = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : int , ):
__a : Any = FalconForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Tuple = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase (self : str , snake_case_ : int , snake_case_ : int , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : int , ):
__a : int = True
__a : Any = True
__a : Tuple = FalconForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# first forward pass
__a : List[Any] = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , use_cache=snake_case_ , )
__a : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
__a : int = torch.cat([input_mask, next_mask] , dim=-1 )
__a : Dict = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , output_hidden_states=snake_case_ , )['''hidden_states'''][0]
__a : List[Any] = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , past_key_values=snake_case_ , output_hidden_states=snake_case_ , )['''hidden_states'''][0]
# select random slice
__a : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
__a : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def lowerCAmelCase (self : str ):
__a : Optional[int] = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = config_and_inputs
__a : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __lowercase ,__lowercase ,__lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : List[Any] = (FalconForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : List[str] = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Tuple = False
def lowerCAmelCase (self : Optional[int] ):
__a : str = FalconModelTester(self )
__a : Dict = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def lowerCAmelCase (self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCAmelCase (self : Optional[int] ):
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase (self : int ):
__a , *__a : Any = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__a : List[str] = alibi
self.model_tester.create_and_check_model(snake_case_ , *snake_case_ )
def lowerCAmelCase (self : Optional[int] ):
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[Any] = 3
__a : Optional[int] = input_dict['''input_ids''']
__a : List[Any] = input_ids.ne(1 ).to(snake_case_ )
__a : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__a : Union[str, Any] = FalconForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Any = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase (self : Tuple ):
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = 3
__a : List[Any] = '''single_label_classification'''
__a : Dict = input_dict['''input_ids''']
__a : Optional[int] = input_ids.ne(1 ).to(snake_case_ )
__a : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__a : Optional[Any] = FalconForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Optional[int] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase (self : Union[str, Any] ):
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Dict = input_dict['''input_ids''']
__a : Optional[Any] = FalconForCausalLM(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Optional[int] = model(snake_case_ , use_cache=snake_case_ )
__a : Tuple = input_ids.shape[0]
__a : Any = model._convert_to_rw_cache(result.past_key_values )
__a : List[Any] = model._convert_cache_to_standard_format(snake_case_ , snake_case_ )
for layer in range(len(snake_case_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowerCAmelCase (self : Dict ):
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = 3
__a : int = '''multi_label_classification'''
__a : Union[str, Any] = input_dict['''input_ids''']
__a : List[str] = input_ids.ne(1 ).to(snake_case_ )
__a : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__a : Union[str, Any] = FalconForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Union[str, Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase (self : List[Any] ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(snake_case_ , '''use_cache''' ):
return
__a : Optional[Any] = model_class(snake_case_ ).to(snake_case_ )
if "use_cache" not in inputs:
__a : Optional[int] = True
__a : Any = model(**snake_case_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__a : Tuple = (
getattr(snake_case_ , '''decoder_layers''' , snake_case_ )
or getattr(snake_case_ , '''num_decoder_layers''' , snake_case_ )
or config.num_hidden_layers
)
__a : List[Any] = getattr(snake_case_ , '''num_kv_heads''' , config.num_attention_heads )
__a : Union[str, Any] = getattr(snake_case_ , '''d_model''' , config.hidden_size )
__a : int = embed_dim // num_attention_heads
__a : Tuple = outputs['''past_key_values''']
self.assertEqual(len(snake_case_ ) , snake_case_ )
__a , __a : Union[str, Any] = inputs['''input_ids'''].shape
for i in range(snake_case_ ):
if config.new_decoder_architecture:
__a : int = config.num_attention_heads
elif config.multi_query:
__a : Any = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase (self : Optional[int] ):
__a : int = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
__a : Union[str, Any] = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(snake_case_ )
__a : Dict = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(snake_case_ )
__a : List[str] = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
__a : Tuple = model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=1_9 )
__a : List[Any] = tokenizer.batch_decode(snake_case_ )[0]
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase (self : List[Any] ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__a : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case_ )
__a : int = FalconForCausalLM.from_pretrained(snake_case_ )
model.eval()
model.to(snake_case_ )
__a : Any = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(snake_case_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=4 )
model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=4 )
model.generate(**snake_case_ , num_beams=2 , max_new_tokens=4 )
@slow
def lowerCAmelCase (self : List[Any] ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__a : Optional[int] = AutoTokenizer.from_pretrained(snake_case_ )
__a : List[str] = FalconForCausalLM.from_pretrained(snake_case_ )
model.eval()
model.to(device=snake_case_ )
__a : Dict = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(snake_case_ )
# Test results are the same with and without cache
__a : Tuple = model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=2_0 , use_cache=snake_case_ )
__a : Union[str, Any] = model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=2_0 , use_cache=snake_case_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 216 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __UpperCamelCase ( lowerCAmelCase__ : int ):
__a : int = int(number**0.5 )
return number == sq * sq
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
__a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__a : int = x_den * y_den * z_den
__a : int = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
top //= hcf
bottom //= hcf
return top, bottom
def __UpperCamelCase ( lowerCAmelCase__ : int = 3_5 ):
__a : set = set()
__a : int
__a : Fraction = Fraction(0 )
__a : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__a : Any = x_num * y_den + x_den * y_num
__a : Dict = x_den * y_den
__a : int = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : str = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
__a : Union[str, Any] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__a : Optional[Any] = x_den * x_den * y_den * y_den
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
__a : Any = int(sqrt(lowerCAmelCase__ ) )
__a : Optional[Any] = int(sqrt(lowerCAmelCase__ ) )
__a : int = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : str = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=-1
__a : List[str] = x_num * y_num
__a : List[str] = x_den * y_num + x_num * y_den
__a : str = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Optional[int] = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
__a : List[str] = x_num * x_num * y_num * y_num
__a : Optional[int] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
__a : Union[str, Any] = int(sqrt(lowerCAmelCase__ ) )
__a : List[str] = int(sqrt(lowerCAmelCase__ ) )
__a : Optional[int] = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : int = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
for num, den in unique_s:
total += Fraction(lowerCAmelCase__ , lowerCAmelCase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 216 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"tanreinama/GPTSAN-2.8B-spout_is_uniform": (
"https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = """gptsan-japanese"""
__SCREAMING_SNAKE_CASE :Any = [
"""past_key_values""",
]
__SCREAMING_SNAKE_CASE :int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any , a__ : str=3_6000 , a__ : int=1280 , a__ : Tuple=1024 , a__ : Optional[Any]=8192 , a__ : Dict=4096 , a__ : str=128 , a__ : List[Any]=10 , a__ : Tuple=0 , a__ : int=16 , a__ : Dict=16 , a__ : List[str]=128 , a__ : List[Any]=0.0 , a__ : Any=1E-5 , a__ : int=False , a__ : Optional[Any]=0.0 , a__ : str="float32" , a__ : str=False , a__ : Dict=False , a__ : List[Any]=False , a__ : Union[str, Any]=0.002 , a__ : Optional[Any]=False , a__ : Dict=True , a__ : List[Any]=3_5998 , a__ : Optional[Any]=3_5995 , a__ : Dict=3_5999 , **a__ : Tuple , ):
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = d_model
__magic_name__ = d_ff
__magic_name__ = d_ext
__magic_name__ = d_spout
__magic_name__ = num_switch_layers
__magic_name__ = num_ext_layers
__magic_name__ = num_switch_layers + num_ext_layers
__magic_name__ = num_heads
__magic_name__ = num_experts
__magic_name__ = expert_capacity
__magic_name__ = dropout_rate
__magic_name__ = layer_norm_epsilon
__magic_name__ = router_bias
__magic_name__ = router_jitter_noise
__magic_name__ = router_dtype
__magic_name__ = router_ignore_padding_tokens
__magic_name__ = output_hidden_states
__magic_name__ = output_attentions
__magic_name__ = initializer_factor
__magic_name__ = output_router_logits
__magic_name__ = use_cache
super().__init__(
separator_token_id=a__ , pad_token_id=a__ , eos_token_id=a__ , **a__ , )
| 98 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
__magic_name__ = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__magic_name__ = MaskFormerConfig(backbone_config=a )
__magic_name__ = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
__magic_name__ = 847
__magic_name__ = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
__magic_name__ = 150
__magic_name__ = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
__magic_name__ = 171
__magic_name__ = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
__magic_name__ = 133
__magic_name__ = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
__magic_name__ = 19
__magic_name__ = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
__magic_name__ = 65
__magic_name__ = '''mapillary-vistas-id2label.json'''
__magic_name__ = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
__magic_name__ = {int(a ): v for k, v in idalabel.items()}
return config
def UpperCamelCase ( a ) -> Tuple:
'''simple docstring'''
__magic_name__ = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def UpperCamelCase ( a , a , a ) -> str:
'''simple docstring'''
__magic_name__ = dct.pop(a )
__magic_name__ = val
def UpperCamelCase ( a , a ) -> List[str]:
'''simple docstring'''
__magic_name__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__magic_name__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__magic_name__ = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
__magic_name__ = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[:dim, :]
__magic_name__ = in_proj_bias[: dim]
__magic_name__ = in_proj_weight[
dim : dim * 2, :
]
__magic_name__ = in_proj_bias[
dim : dim * 2
]
__magic_name__ = in_proj_weight[
-dim :, :
]
__magic_name__ = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase ( a , a ) -> int:
'''simple docstring'''
# fmt: off
__magic_name__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__magic_name__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
__magic_name__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[: hidden_size, :]
__magic_name__ = in_proj_bias[:config.hidden_size]
__magic_name__ = in_proj_weight[hidden_size : hidden_size * 2, :]
__magic_name__ = in_proj_bias[hidden_size : hidden_size * 2]
__magic_name__ = in_proj_weight[-hidden_size :, :]
__magic_name__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__magic_name__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
__magic_name__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[: hidden_size, :]
__magic_name__ = in_proj_bias[:config.hidden_size]
__magic_name__ = in_proj_weight[hidden_size : hidden_size * 2, :]
__magic_name__ = in_proj_bias[hidden_size : hidden_size * 2]
__magic_name__ = in_proj_weight[-hidden_size :, :]
__magic_name__ = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCamelCase ( ) -> torch.Tensor:
'''simple docstring'''
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( a , a , a , a = False ) -> Dict:
'''simple docstring'''
__magic_name__ = get_maskformer_config(a )
# load original state_dict
with open(a , '''rb''' ) as f:
__magic_name__ = pickle.load(a )
__magic_name__ = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__magic_name__ = create_rename_keys(a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_swin_q_k_v(a , config.backbone_config )
read_in_decoder_q_k_v(a , a )
# update to torch tensors
for key, value in state_dict.items():
__magic_name__ = torch.from_numpy(a )
# load 🤗 model
__magic_name__ = MaskFormerForInstanceSegmentation(a )
model.eval()
for name, param in model.named_parameters():
print(a , param.shape )
__magic_name__ , __magic_name__ = model.load_state_dict(a , strict=a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(a ) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
__magic_name__ = prepare_img()
if "vistas" in model_name:
__magic_name__ = 65
elif "cityscapes" in model_name:
__magic_name__ = 6_5535
else:
__magic_name__ = 255
__magic_name__ = True if '''ade''' in model_name else False
__magic_name__ = MaskFormerImageProcessor(ignore_index=a , reduce_labels=a )
__magic_name__ = image_processor(a , return_tensors='''pt''' )
__magic_name__ = model(**a )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__magic_name__ = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F'''nielsr/{model_name}''' )
image_processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCAmelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 98 | 1 |
'''simple docstring'''
_A : int = 8.3_1_4_4_5_9_8
def UpperCamelCase_ ( snake_case_ : float , snake_case_ : float ) -> float:
'''simple docstring'''
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_A : int = 300
_A : Any = 28
_A : Tuple = rms_speed_of_molecule(temperature, molar_mass)
print(f'Vrms of Nitrogen gas at 300 K is {vrms} m/s')
| 229 | '''simple docstring'''
def UpperCamelCase_ ( snake_case_ : list[int] , snake_case_ : list[int] ) -> tuple[float, float]:
'''simple docstring'''
if not len(snake_case_ ) == len(snake_case_ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = equationa
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = equationa
# Calculate the determinants of the matrices
__lowerCAmelCase = aa * ba - aa * ba
__lowerCAmelCase = ca * ba - ca * ba
__lowerCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__lowerCAmelCase = determinant_x / determinant
__lowerCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 229 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( a_):
A: List[str] = (UniPCMultistepScheduler,)
A: Any = (("num_inference_steps", 2_5),)
def UpperCAmelCase__ ( self : List[Any] , **lowerCamelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Any = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**lowerCamelCase__ )
return config
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : Dict=0 , **lowerCamelCase__ : List[str] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : int = dict(self.forward_default_kwargs )
UpperCamelCase__ : List[str] = kwargs.pop('''num_inference_steps''' , lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = self.dummy_sample
UpperCamelCase__ : int = 0.1 * sample
UpperCamelCase__ : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : int = self.get_scheduler_config(**lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCamelCase__ : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCamelCase__ : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = sample, sample
for t in range(lowerCamelCase__ , time_step + scheduler.config.solver_order + 1 ):
UpperCamelCase__ : Tuple = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
UpperCamelCase__ : Optional[Any] = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[Any]=0 , **lowerCamelCase__ : Any ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = dict(self.forward_default_kwargs )
UpperCamelCase__ : int = kwargs.pop('''num_inference_steps''' , lowerCamelCase__ )
UpperCamelCase__ : int = self.dummy_sample
UpperCamelCase__ : str = 0.1 * sample
UpperCamelCase__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : Union[str, Any] = self.get_scheduler_config()
UpperCamelCase__ : str = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase__ : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCamelCase__ : str = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase__ : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase__ : str = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
UpperCamelCase__ : Any = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Any ) -> str:
'''simple docstring'''
if scheduler is None:
UpperCamelCase__ : Union[str, Any] = self.scheduler_classes[0]
UpperCamelCase__ : Dict = self.get_scheduler_config(**lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = scheduler_class(**lowerCamelCase__ )
UpperCamelCase__ : Dict = self.scheduler_classes[0]
UpperCamelCase__ : List[Any] = self.get_scheduler_config(**lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = scheduler_class(**lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = 10
UpperCamelCase__ : List[str] = self.dummy_model()
UpperCamelCase__ : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ : List[Any] = model(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
return sample
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
UpperCamelCase__ : Dict = dict(self.forward_default_kwargs )
UpperCamelCase__ : Optional[Any] = kwargs.pop('''num_inference_steps''' , lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : Tuple = self.get_scheduler_config()
UpperCamelCase__ : Dict = scheduler_class(**lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = self.dummy_sample
UpperCamelCase__ : int = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ , '''set_timesteps''' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ , '''set_timesteps''' ):
UpperCamelCase__ : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
UpperCamelCase__ : Any = dummy_past_residuals[: scheduler.config.solver_order]
UpperCamelCase__ : str = scheduler.timesteps[5]
UpperCamelCase__ : List[str] = scheduler.timesteps[6]
UpperCamelCase__ : Dict = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
UpperCamelCase__ : Union[str, Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : str = UniPCMultistepScheduler(**self.get_scheduler_config() )
UpperCamelCase__ : Union[str, Any] = self.full_loop(scheduler=lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
UpperCamelCase__ : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCamelCase__ : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config )
UpperCamelCase__ : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCamelCase__ : List[str] = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCamelCase__ : int = self.full_loop(scheduler=lowerCamelCase__ )
UpperCamelCase__ : Dict = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase__ , prediction_type=lowerCamelCase__ , sample_max_value=lowerCamelCase__ , solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , )
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , )
UpperCamelCase__ : Union[str, Any] = self.full_loop(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , )
assert not torch.isnan(lowerCamelCase__ ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self : List[Any] ) -> str:
'''simple docstring'''
self.check_over_configs(lower_order_final=lowerCamelCase__ )
self.check_over_configs(lower_order_final=lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCamelCase__ , time_step=0 )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : str = self.full_loop()
UpperCamelCase__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def UpperCAmelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Any = self.full_loop(prediction_type='''v_prediction''' )
UpperCamelCase__ : Dict = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.1014 ) < 1E-3
def UpperCAmelCase__ ( self : Dict ) -> int:
'''simple docstring'''
UpperCamelCase__ : Any = self.scheduler_classes[0]
UpperCamelCase__ : Optional[int] = self.get_scheduler_config(thresholding=lowerCamelCase__ , dynamic_thresholding_ratio=0 )
UpperCamelCase__ : Optional[Any] = scheduler_class(**lowerCamelCase__ )
UpperCamelCase__ : List[Any] = 10
UpperCamelCase__ : Dict = self.dummy_model()
UpperCamelCase__ : Union[str, Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ : Optional[int] = model(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Tuple = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self : Optional[int] , **lowerCamelCase__ : Dict ) -> List[str]:
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : List[Any] = self.get_scheduler_config(**lowerCamelCase__ )
UpperCamelCase__ : Tuple = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 371 |
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
UpperCamelCase__ : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCamelCase__ : int = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
with open(lowerCAmelCase_ ) as metadata_file:
__SCREAMING_SNAKE_CASE = json.load(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=lowerCAmelCase_ , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
__SCREAMING_SNAKE_CASE = torch.load(lowerCAmelCase_ , map_location="cpu" )
# Load the entity vocab file
__SCREAMING_SNAKE_CASE = load_entity_vocab(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
__SCREAMING_SNAKE_CASE = AddedToken("<ent>" , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = AddedToken("<ent2>" , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(lowerCAmelCase_ )
# Initialize the embeddings of the special tokens
__SCREAMING_SNAKE_CASE = state_dict["embeddings.word_embeddings.weight"]
__SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
__SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
__SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__SCREAMING_SNAKE_CASE = f"""encoder.layer.{layer_index}.attention.self."""
__SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
__SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
__SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__SCREAMING_SNAKE_CASE = state_dict["entity_embeddings.entity_embeddings.weight"]
__SCREAMING_SNAKE_CASE = entity_emb[entity_vocab["[MASK]"]]
__SCREAMING_SNAKE_CASE = LukeModel(config=lowerCAmelCase_ ).eval()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
if not (len(lowerCAmelCase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"""Missing keys {', '.join(lowerCAmelCase_ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
f""" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}""" )
# Check outputs
__SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(lowerCAmelCase_ , task="entity_classification" )
__SCREAMING_SNAKE_CASE = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
__SCREAMING_SNAKE_CASE = (39, 42)
__SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase_ , entity_spans=[span] , add_prefix_space=lowerCAmelCase_ , return_tensors="pt" )
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ )
# Verify word hidden states
if model_size == "large":
__SCREAMING_SNAKE_CASE = torch.Size((1, 42, 1024) )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
__SCREAMING_SNAKE_CASE = torch.Size((1, 42, 768) )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__SCREAMING_SNAKE_CASE = torch.Size((1, 1, 1024) )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
__SCREAMING_SNAKE_CASE = torch.Size((1, 1, 768) )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowerCAmelCase_ ) )
model.save_pretrained(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
with open(lowerCAmelCase_ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = line.rstrip().split("\t" )
__SCREAMING_SNAKE_CASE = index
return entity_vocab
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 54 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A: str = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Dict = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: List[Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Dict = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: int = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
A: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 | 0 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_lowerCAmelCase = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
_lowerCAmelCase = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
_lowerCAmelCase = r"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def snake_case__ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def snake_case__ ( self : Optional[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] ):
__magic_name__ = 0.0
for i, j in zip(a__ , a__ ):
n_correct += 1.0 if math_equivalence.is_equiv(a__ , a__ ) else 0.0
__magic_name__ = n_correct / len(a__ )
return {
"accuracy": accuracy,
}
| 98 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Tuple , a__ : Union[str, Any] , a__ : Union[str, Any]=7 , a__ : Dict=3 , a__ : Optional[Any]=18 , a__ : Optional[Any]=30 , a__ : Tuple=400 , a__ : Optional[int]=True , a__ : int=None , a__ : Union[str, Any]=True , a__ : Optional[Any]=None , a__ : str=True , a__ : List[Any]=[0.5, 0.5, 0.5] , a__ : Tuple=[0.5, 0.5, 0.5] , a__ : Union[str, Any]=False , ):
__magic_name__ = size if size is not None else {'''height''': 20, '''width''': 20}
__magic_name__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_center_crop
__magic_name__ = crop_size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
__magic_name__ = do_reduce_labels
def snake_case__ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCamelCase ( ) -> str:
'''simple docstring'''
__magic_name__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__magic_name__ = Image.open(dataset[0]['''file'''] )
__magic_name__ = Image.open(dataset[1]['''file'''] )
return image, map
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
__magic_name__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__magic_name__ = Image.open(ds[0]['''file'''] )
__magic_name__ = Image.open(ds[1]['''file'''] )
__magic_name__ = Image.open(ds[2]['''file'''] )
__magic_name__ = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Optional[Any] = BeitImageProcessor if is_vision_available() else None
def snake_case__ ( self : Dict ):
__magic_name__ = BeitImageProcessingTester(self )
@property
def snake_case__ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , '''do_resize''' ) )
self.assertTrue(hasattr(a__ , '''size''' ) )
self.assertTrue(hasattr(a__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(a__ , '''center_crop''' ) )
self.assertTrue(hasattr(a__ , '''do_normalize''' ) )
self.assertTrue(hasattr(a__ , '''image_mean''' ) )
self.assertTrue(hasattr(a__ , '''image_std''' ) )
def snake_case__ ( self : int ):
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , a__ )
__magic_name__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=a__ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , a__ )
def snake_case__ ( self : Optional[Any] ):
pass
def snake_case__ ( self : Dict ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Optional[int] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : List[str] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Union[str, Any] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
__magic_name__ = []
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
__magic_name__ , __magic_name__ = prepare_semantic_single_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
__magic_name__ , __magic_name__ = prepare_semantic_batch_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def snake_case__ ( self : Any ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__magic_name__ , __magic_name__ = prepare_semantic_single_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
__magic_name__ = True
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 98 | 1 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__UpperCAmelCase : Optional[Any] = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__UpperCAmelCase : Any = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
__UpperCAmelCase : Any = "zero2"
__UpperCAmelCase : Tuple = "zero3"
__UpperCAmelCase : Dict = [ZEROa, ZEROa]
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__snake_case: Dict = parameterized.to_safe_name("""_""".join(str(SCREAMING_SNAKE_CASE__) for x in param.args))
return F'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
__UpperCAmelCase : Tuple = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
@parameterized.expand(A , name_func=A )
def UpperCAmelCase__ ( self : int , A : List[str] , A : List[str] ):
self.run_and_check(
stage=A , model=A , distributed=A , fpaa=A , )
@require_torch_multi_gpu
@parameterized.expand(A , name_func=A )
def UpperCAmelCase__ ( self : List[Any] , A : Union[str, Any] , A : Optional[int] ):
self.run_and_check(
stage=A , model=A , distributed=A , fpaa=A , )
@parameterized.expand(A , name_func=A )
def UpperCAmelCase__ ( self : int , A : Optional[int] , A : List[str] ):
self.run_and_check(
stage=A , model=A , distributed=A , fpaa=A , )
@require_torch_multi_gpu
@parameterized.expand(A , name_func=A )
def UpperCAmelCase__ ( self : Dict , A : Dict , A : Optional[Any] ):
self.run_and_check(
stage=A , model=A , distributed=A , fpaa=A , )
def UpperCAmelCase__ ( self : Union[str, Any] , A : Optional[int] ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def UpperCAmelCase__ ( self : List[str] , A : str , A : str , A : int = 10 , A : bool = True , A : bool = True , A : bool = True , ):
__snake_case: Optional[Any] = models[model]
__snake_case: Optional[Any] = self.run_trainer(
stage=A , model_name=A , eval_steps=A , num_train_epochs=1 , distributed=A , fpaa=A , )
self.do_checks(A )
return output_dir
def UpperCAmelCase__ ( self : str , A : str , A : str , A : int = 10 , A : int = 1 , A : bool = True , A : bool = True , ):
__snake_case: Union[str, Any] = self.get_auto_remove_tmp_dir("""./xxx""" , after=A )
__snake_case: int = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(A )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__snake_case: Optional[int] = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
__snake_case: List[Any] = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
__snake_case: List[Any] = self.get_launcher(A )
__snake_case: str = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A , env=self.get_env() )
return output_dir
def UpperCAmelCase__ ( self : Optional[Any] , A : Any=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
__snake_case: Any = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 111 |
import math
def A__ ( SCREAMING_SNAKE_CASE__) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE__ = 1_0001) -> int:
try:
__snake_case: List[str] = int(SCREAMING_SNAKE_CASE__)
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""") from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""")
__snake_case: list[int] = []
__snake_case: List[str] = 2
while len(SCREAMING_SNAKE_CASE__) < nth:
if is_prime(SCREAMING_SNAKE_CASE__):
primes.append(SCREAMING_SNAKE_CASE__)
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE__) - 1]
if __name__ == "__main__":
print(f'{solution() = }')
| 111 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self , _lowerCamelCase = 1 , _lowerCamelCase = 50 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , **_lowerCamelCase , ):
a :Optional[Any] = self.unet.config.sample_size
a :List[str] = (batch_size, 3, img_size, img_size)
a :int = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
a :int = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
a :Dict = self.scheduler.schedule[t]
a :List[Any] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
a , a :Tuple = self.scheduler.add_noise_to_input(_lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
a :Tuple = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
a :str = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
a :Optional[Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
a :Dict = self.scheduler.step_correct(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , step_output.prev_sample , step_output['''derivative'''] , )
a :Union[str, Any] = step_output.prev_sample
a :Tuple = (sample / 2 + 0.5).clamp(0 , 1 )
a :Tuple = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a :Optional[Any] = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 281 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 281 | 1 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if isinstance(lowerCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowercase :
def A__ ( self ,A__ ,A__):
pass
def A__ ( self):
pass
def A__ ( self):
pass
def A__ ( self ,A__ ,A__ ,A__):
lowercase = np.abs((a - b)).max()
self.assertLessEqual(A__ ,A__ ,f'Difference between torch and flax is {diff} (>= {tol}).')
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__=None ,**A__):
lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(A__ ,A__)
lowercase = FlaxVisionTextDualEncoderModel(A__)
lowercase = model(input_ids=A__ ,pixel_values=A__ ,attention_mask=A__)
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], config.projection_dim))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__=None ,**A__):
lowercase , lowercase = self.get_vision_text_model(A__ ,A__)
lowercase = {'''vision_model''': vision_model, '''text_model''': text_model}
lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**A__)
lowercase = model(input_ids=A__ ,pixel_values=A__ ,attention_mask=A__)
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], model.config.projection_dim))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__=None ,**A__):
lowercase , lowercase = self.get_vision_text_model(A__ ,A__)
lowercase = {'''vision_model''': vision_model, '''text_model''': text_model}
lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**A__)
lowercase = model(input_ids=A__ ,pixel_values=A__ ,attention_mask=A__)
lowercase = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A__)
lowercase = FlaxVisionTextDualEncoderModel.from_pretrained(A__)
lowercase = model(input_ids=A__ ,pixel_values=A__ ,attention_mask=A__)
lowercase = after_output[0]
lowercase = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(A__ ,1E-3)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__=None ,**A__):
lowercase , lowercase = self.get_vision_text_model(A__ ,A__)
lowercase = {'''vision_model''': vision_model, '''text_model''': text_model}
lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**A__)
lowercase = model(
input_ids=A__ ,pixel_values=A__ ,attention_mask=A__ ,output_attentions=A__)
lowercase = output.vision_model_output.attentions
self.assertEqual(len(A__) ,vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase = to_atuple(vision_model.config.image_size)
lowercase = to_atuple(vision_model.config.patch_size)
lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len))
lowercase = output.text_model_output.attentions
self.assertEqual(len(A__) ,text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def A__ ( self ,A__ ,A__ ,A__):
pt_model.to(A__)
pt_model.eval()
# prepare inputs
lowercase = inputs_dict
lowercase = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
lowercase = pt_model(**A__).to_tuple()
lowercase = fx_model(**A__).to_tuple()
self.assertEqual(len(A__) ,len(A__) ,'''Output lengths differ between Flax and PyTorch''')
for fx_output, pt_output in zip(fx_outputs[:4] ,pt_outputs[:4]):
self.assert_almost_equals(A__ ,pt_output.numpy() ,4E-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(A__)
lowercase = FlaxVisionTextDualEncoderModel.from_pretrained(A__ ,from_pt=A__)
lowercase = fx_model_loaded(**A__).to_tuple()
self.assertEqual(len(A__) ,len(A__) ,'''Output lengths differ between Flax and PyTorch''')
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] ,pt_outputs[:4]):
self.assert_almost_equals(A__ ,pt_output.numpy() ,4E-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(A__)
lowercase = VisionTextDualEncoderModel.from_pretrained(A__ ,from_flax=A__)
pt_model_loaded.to(A__)
pt_model_loaded.eval()
with torch.no_grad():
lowercase = pt_model_loaded(**A__).to_tuple()
self.assertEqual(len(A__) ,len(A__) ,'''Output lengths differ between Flax and PyTorch''')
for fx_output, pt_output_loaded in zip(fx_outputs[:4] ,pt_outputs_loaded[:4]):
self.assert_almost_equals(A__ ,pt_output_loaded.numpy() ,4E-2)
def A__ ( self ,A__ ,A__ ,A__):
lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(A__ ,A__)
lowercase = VisionTextDualEncoderModel(A__)
lowercase = FlaxVisionTextDualEncoderModel(A__)
lowercase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,A__)
lowercase = fx_state
self.check_pt_flax_equivalence(A__ ,A__ ,A__)
def A__ ( self ,A__ ,A__ ,A__):
lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(A__ ,A__)
lowercase = VisionTextDualEncoderModel(A__)
lowercase = FlaxVisionTextDualEncoderModel(A__)
lowercase = load_flax_weights_in_pytorch_model(A__ ,fx_model.params)
self.check_pt_flax_equivalence(A__ ,A__ ,A__)
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**A__)
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**A__)
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
self.check_save_load(**A__)
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**A__)
@is_pt_flax_cross_test
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
lowercase = config_inputs_dict.pop('''vision_config''')
lowercase = config_inputs_dict.pop('''text_config''')
lowercase = config_inputs_dict
self.check_equivalence_pt_to_flax(A__ ,A__ ,A__)
self.check_equivalence_flax_to_pt(A__ ,A__ ,A__)
@slow
def A__ ( self):
lowercase , lowercase = self.get_pretrained_model_and_inputs()
lowercase = model_a(**A__)
lowercase = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(A__)
lowercase = FlaxVisionTextDualEncoderModel.from_pretrained(A__)
lowercase = model_a(**A__)
lowercase = after_outputs[0]
lowercase = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(A__ ,1E-5)
@require_flax
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
def A__ ( self):
lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' ,'''hf-internal-testing/tiny-bert''' ,vision_from_pt=A__ ,text_from_pt=A__ ,)
lowercase = 1_3
lowercase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
lowercase = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size)
lowercase = random_attention_mask([batch_size, 4])
lowercase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def A__ ( self ,A__ ,A__):
lowercase = FlaxViTModel(A__)
lowercase = FlaxBertModel(A__)
return vision_model, text_model
def A__ ( self):
lowercase = FlaxViTModelTester(self)
lowercase = FlaxBertModelTester(self)
lowercase = vit_model_tester.prepare_config_and_inputs()
lowercase = bert_model_tester.prepare_config_and_inputs()
lowercase , lowercase = vision_config_and_inputs
lowercase , lowercase , lowercase , lowercase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
def A__ ( self):
lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' ,'''hf-internal-testing/tiny-bert''' ,vision_from_pt=A__ ,text_from_pt=A__ ,)
lowercase = 1_3
lowercase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
lowercase = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size)
lowercase = random_attention_mask([batch_size, 4])
lowercase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def A__ ( self ,A__ ,A__):
lowercase = FlaxCLIPVisionModel(A__)
lowercase = FlaxBertModel(A__)
return vision_model, text_model
def A__ ( self):
lowercase = FlaxCLIPVisionModelTester(self)
lowercase = FlaxBertModelTester(self)
lowercase = clip_model_tester.prepare_config_and_inputs()
lowercase = bert_model_tester.prepare_config_and_inputs()
lowercase , lowercase = vision_config_and_inputs
lowercase , lowercase , lowercase , lowercase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowercase ( unittest.TestCase ):
@slow
def A__ ( self):
lowercase = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' ,logit_scale_init_value=1.0)
lowercase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''')
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
lowercase = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] ,images=A__ ,padding=A__ ,return_tensors='''np''')
lowercase = model(**A__)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,)
lowercase = np.array([[1.2284727, 0.3104122]])
self.assertTrue(np.allclose(outputs.logits_per_image ,A__ ,atol=1E-3))
| 101 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self , UpperCAmelCase ) -> Union[str, Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
_snake_case = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(UpperCAmelCase )
def lowercase (self ) -> Optional[int]:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase (self ) -> Dict:
_snake_case = """sgugger/tiny-distilbert-classification"""
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , only_pretrain_model=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase (self ) -> Optional[Any]:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , torchscript=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def lowercase (self ) -> Optional[int]:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , fpaa=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase (self ) -> Union[str, Any]:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = AutoConfig.from_pretrained(UpperCAmelCase )
# set architectures equal to `None`
_snake_case = None
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase (self ) -> Optional[int]:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def lowercase (self ) -> Tuple:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=UpperCAmelCase , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase (self ) -> Union[str, Any]:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = AutoConfig.from_pretrained(UpperCAmelCase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase (self ) -> Dict:
_snake_case = """sshleifer/tinier_bart"""
_snake_case = AutoConfig.from_pretrained(UpperCAmelCase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase (self ) -> Any:
_snake_case = """sshleifer/tiny-gpt2"""
_snake_case = AutoConfig.from_pretrained(UpperCAmelCase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase (self ) -> int:
_snake_case = """sshleifer/tinier_bart"""
_snake_case = AutoConfig.from_pretrained(UpperCAmelCase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase (self ) -> str:
_snake_case = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , save_to_csv=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCAmelCase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(UpperCAmelCase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(UpperCAmelCase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(UpperCAmelCase , """train_time.csv""" ) , env_info_csv_file=os.path.join(UpperCAmelCase , """env.csv""" ) , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCAmelCase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase , """env.csv""" ) ).exists() )
def lowercase (self ) -> int:
_snake_case = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(UpperCAmelCase ):
self.assertTrue(hasattr(UpperCAmelCase , """sequential""" ) )
self.assertTrue(hasattr(UpperCAmelCase , """cumulative""" ) )
self.assertTrue(hasattr(UpperCAmelCase , """current""" ) )
self.assertTrue(hasattr(UpperCAmelCase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCAmelCase , """log.txt""" ) , log_print=UpperCAmelCase , trace_memory_line_by_line=UpperCAmelCase , multi_process=UpperCAmelCase , )
_snake_case = PyTorchBenchmark(UpperCAmelCase )
_snake_case = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(UpperCAmelCase , """log.txt""" ) ).exists() ) | 341 | 0 |
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
lowerCamelCase_ = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def __lowerCamelCase ( a_ : List[Any] , a_ : Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE :str = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__SCREAMING_SNAKE_CASE :Tuple = int(re.match(r'''.*layer_(\d*).*''' , a_ )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def __lowerCamelCase ( a_ : int ) -> Optional[Any]:
if dtype == torch.bool:
return 1 / 8
__SCREAMING_SNAKE_CASE :Optional[int] = re.search(r'''[^\d](\d+)$''' , str(a_ ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
__SCREAMING_SNAKE_CASE :List[str] = int(bit_search.groups()[0] )
return bit_size // 8
def __lowerCamelCase ( a_ : List[str] , a_ : str , a_ : Any , a_ : Any , a_ : List[str] ) -> int:
# Construct model
if bloom_config_file == "":
__SCREAMING_SNAKE_CASE :Optional[int] = BloomConfig()
else:
__SCREAMING_SNAKE_CASE :Tuple = BloomConfig.from_json_file(a_ )
if shard_model:
__SCREAMING_SNAKE_CASE :Optional[int] = os.listdir(a_ )
__SCREAMING_SNAKE_CASE :Optional[int] = sorted(filter(lambda a_ : s.startswith('''layer''' ) and "model_00" in s , a_ ) )
__SCREAMING_SNAKE_CASE :Any = {'''weight_map''': {}, '''metadata''': {}}
__SCREAMING_SNAKE_CASE :Optional[int] = 0
__SCREAMING_SNAKE_CASE :Optional[Any] = None
__SCREAMING_SNAKE_CASE :Optional[int] = BloomConfig()
for j, file in enumerate(a_ ):
print('''Processing file: {}'''.format(a_ ) )
__SCREAMING_SNAKE_CASE :Dict = None
for i in range(a_ ):
# load all TP files
__SCREAMING_SNAKE_CASE :Optional[int] = file.replace('''model_00''' , f'''model_0{i}''' )
__SCREAMING_SNAKE_CASE :Optional[int] = torch.load(os.path.join(a_ , a_ ) , map_location='''cpu''' )
# Rename keys in the transformers names
__SCREAMING_SNAKE_CASE :List[str] = list(temp.keys() )
for key in keys:
__SCREAMING_SNAKE_CASE :Any = temp.pop(a_ )
if tensors is None:
__SCREAMING_SNAKE_CASE :int = temp
else:
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__SCREAMING_SNAKE_CASE :Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__SCREAMING_SNAKE_CASE :Optional[int] = torch.cat([tensors[key], temp[key]] , dim=a_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__SCREAMING_SNAKE_CASE :List[Any] = tensors[key] / pretraining_tp
torch.save(
a_ , os.path.join(
a_ , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(a_ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
__SCREAMING_SNAKE_CASE :Any = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__SCREAMING_SNAKE_CASE :Tuple = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(a_ ) ).zfill(5 ) )
__SCREAMING_SNAKE_CASE :Optional[int] = BloomConfig()
__SCREAMING_SNAKE_CASE :Optional[int] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
__SCREAMING_SNAKE_CASE :str = total_size
with open(a_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(a_ , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE :str = json.dumps(a_ , indent=2 , sort_keys=a_ ) + '''\n'''
f.write(a_ )
else:
__SCREAMING_SNAKE_CASE :Optional[int] = BloomModel(a_ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = os.listdir(a_ )
__SCREAMING_SNAKE_CASE :List[Any] = sorted(filter(lambda a_ : s.startswith('''layer''' ) and "model_00" in s , a_ ) )
__SCREAMING_SNAKE_CASE :Optional[Any] = None
for i, file in enumerate(a_ ):
__SCREAMING_SNAKE_CASE :Dict = None
for i in range(a_ ):
# load all TP files
__SCREAMING_SNAKE_CASE :List[Any] = file.replace('''model_00''' , f'''model_0{i}''' )
__SCREAMING_SNAKE_CASE :Dict = torch.load(os.path.join(a_ , a_ ) , map_location='''cpu''' )
# Rename keys in the transformers names
__SCREAMING_SNAKE_CASE :List[Any] = list(temp.keys() )
for key in keys:
__SCREAMING_SNAKE_CASE :List[Any] = temp.pop(a_ )
if tensors is None:
__SCREAMING_SNAKE_CASE :Optional[int] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__SCREAMING_SNAKE_CASE :Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__SCREAMING_SNAKE_CASE :str = torch.cat([tensors[key], temp[key]] , dim=a_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__SCREAMING_SNAKE_CASE :Any = tensors[key] / pretraining_tp
__SCREAMING_SNAKE_CASE :Optional[Any] = model.load_state_dict(a_ , strict=a_ )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
__SCREAMING_SNAKE_CASE :Optional[Any] = set(other_keys.missing_keys )
else:
__SCREAMING_SNAKE_CASE :Dict = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(a_ , exist_ok=a_ )
__SCREAMING_SNAKE_CASE :Dict = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__SCREAMING_SNAKE_CASE :Optional[int] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
__SCREAMING_SNAKE_CASE :Dict = model.to(config.torch_dtype )
torch.save(model.state_dict() , a_ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(a_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
lowerCamelCase_ = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
) | 239 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''xlm-prophetnet'''
SCREAMING_SNAKE_CASE_ : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self ,SCREAMING_SNAKE_CASE__ = 0.1 ,SCREAMING_SNAKE_CASE__ = "gelu" ,SCREAMING_SNAKE_CASE__ = 3_05_22 ,SCREAMING_SNAKE_CASE__ = 10_24 ,SCREAMING_SNAKE_CASE__ = 40_96 ,SCREAMING_SNAKE_CASE__ = 12 ,SCREAMING_SNAKE_CASE__ = 16 ,SCREAMING_SNAKE_CASE__ = 40_96 ,SCREAMING_SNAKE_CASE__ = 12 ,SCREAMING_SNAKE_CASE__ = 16 ,SCREAMING_SNAKE_CASE__ = 0.1 ,SCREAMING_SNAKE_CASE__ = 0.1 ,SCREAMING_SNAKE_CASE__ = 5_12 ,SCREAMING_SNAKE_CASE__ = 0.0_2 ,SCREAMING_SNAKE_CASE__ = True ,SCREAMING_SNAKE_CASE__ = True ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = 2 ,SCREAMING_SNAKE_CASE__ = 32 ,SCREAMING_SNAKE_CASE__ = 1_28 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = 0.0 ,SCREAMING_SNAKE_CASE__ = True ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = 1 ,SCREAMING_SNAKE_CASE__ = 2 ,**SCREAMING_SNAKE_CASE__ ,) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = vocab_size
__SCREAMING_SNAKE_CASE :Tuple = hidden_size
__SCREAMING_SNAKE_CASE :Optional[int] = encoder_ffn_dim
__SCREAMING_SNAKE_CASE :Optional[int] = num_encoder_layers
__SCREAMING_SNAKE_CASE :Tuple = num_encoder_attention_heads
__SCREAMING_SNAKE_CASE :List[Any] = decoder_ffn_dim
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_decoder_layers
__SCREAMING_SNAKE_CASE :Optional[Any] = num_decoder_attention_heads
__SCREAMING_SNAKE_CASE :List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE :Dict = init_std # Normal(0, this parameter)
__SCREAMING_SNAKE_CASE :List[Any] = activation_function
# parameters for xlmprophetnet
__SCREAMING_SNAKE_CASE :Tuple = ngram
__SCREAMING_SNAKE_CASE :int = num_buckets
__SCREAMING_SNAKE_CASE :Optional[int] = relative_max_distance
__SCREAMING_SNAKE_CASE :Union[str, Any] = disable_ngram_loss
__SCREAMING_SNAKE_CASE :Dict = eps
# 3 Types of Dropout
__SCREAMING_SNAKE_CASE :List[str] = attention_dropout
__SCREAMING_SNAKE_CASE :Dict = activation_dropout
__SCREAMING_SNAKE_CASE :Union[str, Any] = dropout
__SCREAMING_SNAKE_CASE :int = use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,add_cross_attention=SCREAMING_SNAKE_CASE__ ,decoder_start_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' ) | 239 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__UpperCAmelCase = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["torch", "scipy"]
def __init__( self , *_A , **_A ) -> Tuple:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Any:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 299 | 1 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase_ : Optional[int] = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase_ : Any = logging.get_logger(__name__)
class __lowerCAmelCase ( __a ):
snake_case : Tuple = """maskformer"""
snake_case : Dict = {"""hidden_size""": """mask_feature_size"""}
snake_case : Union[str, Any] = ["""resnet""", """swin"""]
snake_case : Optional[Any] = ["""detr"""]
def __init__(self , lowerCAmelCase__ = 2_5_6 , lowerCAmelCase__ = 2_5_6 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0.0_2 , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = 2_0.0 , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_UpperCAmelCase : Optional[int] = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = backbone_config.pop("""model_type""" )
_UpperCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase : List[str] = config_class.from_dict(lowerCAmelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_UpperCAmelCase : str = DetrConfig()
else:
# verify that the decoder is supported
_UpperCAmelCase : Optional[int] = (
decoder_config.pop("""model_type""" ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = CONFIG_MAPPING[decoder_type]
_UpperCAmelCase : Union[str, Any] = config_class.from_dict(lowerCAmelCase__ )
_UpperCAmelCase : Any = backbone_config
_UpperCAmelCase : Optional[int] = decoder_config
# main feature dimension for the model
_UpperCAmelCase : Union[str, Any] = fpn_feature_size
_UpperCAmelCase : List[Any] = mask_feature_size
# initializer
_UpperCAmelCase : List[Any] = init_std
_UpperCAmelCase : int = init_xavier_std
# Hungarian matcher && loss
_UpperCAmelCase : List[str] = cross_entropy_weight
_UpperCAmelCase : Union[str, Any] = dice_weight
_UpperCAmelCase : Dict = mask_weight
_UpperCAmelCase : int = use_auxiliary_loss
_UpperCAmelCase : Optional[int] = no_object_weight
_UpperCAmelCase : Any = output_auxiliary_logits
_UpperCAmelCase : List[str] = self.decoder_config.encoder_attention_heads
_UpperCAmelCase : Union[str, Any] = self.decoder_config.num_hidden_layers
super().__init__(**lowerCAmelCase__ )
@classmethod
def snake_case_ (cls , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
return cls(
backbone_config=lowerCAmelCase__ , decoder_config=lowerCAmelCase__ , **lowerCAmelCase__ , )
def snake_case_ (self ):
_UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : List[str] = self.backbone_config.to_dict()
_UpperCAmelCase : str = self.decoder_config.to_dict()
_UpperCAmelCase : Any = self.__class__.model_type
return output
| 358 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ : Tuple = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ : Tuple = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class __lowerCAmelCase ( __a ):
snake_case : Optional[Any] = VOCAB_FILES_NAMES
snake_case : Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case : str = ["""input_ids""", """attention_mask"""]
snake_case : List[str] = RobertaTokenizer
def __init__(self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ):
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase__ ) != add_prefix_space:
_UpperCAmelCase : Tuple = getattr(lowerCAmelCase__ , pre_tok_state.pop("""type""" ) )
_UpperCAmelCase : Any = add_prefix_space
_UpperCAmelCase : List[Any] = pre_tok_class(**lowerCAmelCase__ )
_UpperCAmelCase : Dict = add_prefix_space
_UpperCAmelCase : int = """post_processor"""
_UpperCAmelCase : Any = getattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
if tokenizer_component_instance:
_UpperCAmelCase : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase : Any = tuple(state["""sep"""] )
if "cls" in state:
_UpperCAmelCase : Tuple = tuple(state["""cls"""] )
_UpperCAmelCase : Dict = False
if state.get("""add_prefix_space""" , lowerCAmelCase__ ) != add_prefix_space:
_UpperCAmelCase : List[str] = add_prefix_space
_UpperCAmelCase : Dict = True
if state.get("""trim_offsets""" , lowerCAmelCase__ ) != trim_offsets:
_UpperCAmelCase : Tuple = trim_offsets
_UpperCAmelCase : List[str] = True
if changes_to_apply:
_UpperCAmelCase : Dict = getattr(lowerCAmelCase__ , state.pop("""type""" ) )
_UpperCAmelCase : Optional[Any] = component_class(**lowerCAmelCase__ )
setattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
@property
def snake_case_ (self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else value
_UpperCAmelCase : int = value
def snake_case_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
_UpperCAmelCase : Optional[Any] = kwargs.get("""is_split_into_words""" , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = kwargs.get("""is_split_into_words""" , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
_UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__=None ):
_UpperCAmelCase : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
_UpperCAmelCase : str = [self.sep_token_id]
_UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 170 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Dict = """switch_transformers"""
lowercase_ : str = ["""past_key_values"""]
lowercase_ : Dict = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , snake_case_=3_2_1_2_8 , snake_case_=7_6_8 , snake_case_=6_4 , snake_case_=2_0_4_8 , snake_case_=6_4 , snake_case_=1_2 , snake_case_=3 , snake_case_=1_2 , snake_case_=3 , snake_case_=1_2 , snake_case_=8 , snake_case_=False , snake_case_=0.01 , snake_case_="float32" , snake_case_=False , snake_case_=3_2 , snake_case_=1_2_8 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=0.0_01 , snake_case_=0.0_01 , snake_case_=1.0 , snake_case_="relu" , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=0 , snake_case_=1 , **snake_case_ , ):
"""simple docstring"""
A_ : str = vocab_size
A_ : Tuple = d_model
A_ : Tuple = d_kv
A_ : str = d_ff
A_ : Tuple = num_sparse_encoder_layers
A_ : int = num_layers
A_ : List[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A_ : List[str] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
A_ : List[str] = self.num_layers // self.num_sparse_encoder_layers
else:
A_ : str = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
A_ : int = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
A_ : str = self.num_decoder_layers # HACK: this will create 0 sparse layers
A_ : Optional[Any] = num_heads
A_ : Tuple = num_experts
A_ : List[str] = expert_capacity
A_ : List[Any] = router_bias
A_ : Optional[int] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
A_ : Dict = router_dtype
A_ : int = router_ignore_padding_tokens
A_ : Union[str, Any] = relative_attention_num_buckets
A_ : List[str] = relative_attention_max_distance
A_ : str = dropout_rate
A_ : Tuple = layer_norm_epsilon
A_ : Optional[Any] = initializer_factor
A_ : str = feed_forward_proj
A_ : List[Any] = use_cache
A_ : Optional[Any] = add_router_probs
A_ : Optional[Any] = router_z_loss_coef
A_ : Union[str, Any] = router_aux_loss_coef
A_ : str = self.feed_forward_proj.split('-' )
A_ : List[Any] = act_info[-1]
A_ : Tuple = act_info[0] == 'gated'
if len(snake_case_ ) > 1 and act_info[0] != "gated" or len(snake_case_ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A_ : List[Any] = 'gelu_new'
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_ , ) | 286 |
"""simple docstring"""
from copy import deepcopy
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ = None , snake_case_ = None ):
"""simple docstring"""
if arr is None and size is not None:
A_ : Union[str, Any] = size
A_ : List[str] = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = len(snake_case_ )
A_ : Optional[int] = deepcopy(snake_case_ )
for i in range(1 , self.size ):
A_ : Optional[Any] = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
A_ : Optional[int] = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
return index - (index & (-index))
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
A_ : List[str] = self.next_(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
self.add(snake_case_ , value - self.get(snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if right == 0:
return 0
A_ : Any = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
A_ : Tuple = self.prev(snake_case_ )
return result
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
return self.query(snake_case_ , index + 1 )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
A_ : List[Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
A_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 | 1 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _lowercase ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [randint(-1000 , 1000 ) for i in range(10 )]
SCREAMING_SNAKE_CASE__ : Any = randint(-5000 , 5000 )
return (arr, r)
a :Union[str, Any] = make_dataset()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
for triplet in permutations(__lowerCamelCase , 3 ):
if sum(__lowerCamelCase ) == target:
return tuple(sorted(__lowerCamelCase ) )
return (0, 0, 0)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
arr.sort()
SCREAMING_SNAKE_CASE__ : Dict = len(__lowerCamelCase )
for i in range(n - 1 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _lowercase ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
SCREAMING_SNAKE_CASE__ : Any = "\ntriplet_sum1(*dataset)\n"
SCREAMING_SNAKE_CASE__ : Optional[int] = "\ntriplet_sum2(*dataset)\n"
SCREAMING_SNAKE_CASE__ : Optional[Any] = repeat(setup=__lowerCamelCase , stmt=__lowerCamelCase , repeat=5 , number=1_0000 )
SCREAMING_SNAKE_CASE__ : Tuple = repeat(setup=__lowerCamelCase , stmt=__lowerCamelCase , repeat=5 , number=1_0000 )
return (min(__lowerCamelCase ), min(__lowerCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
a :List[str] = solution_times()
print(f'The time for naive implementation is {times[0]}.')
print(f'The time for optimized implementation is {times[1]}.')
| 360 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __a (metaclass=UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = ["""transformers""", """torch""", """note_seq"""]
def __init__( self , *_a , **_a ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _a ( cls , *_a , **_a ) -> int:
"""simple docstring"""
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _a ( cls , *_a , **_a ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 56 | 0 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
class a__( __snake_case ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , )
def a_ ( self , __lowerCAmelCase = "auto"):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase__)
def a_ ( self):
"""simple docstring"""
self.enable_attention_slicing(UpperCamelCase__)
@torch.no_grad()
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = 512 , __lowerCAmelCase = 512 , __lowerCAmelCase = 50 , __lowerCAmelCase = 7.5 , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__):
lowerCAmelCase = 1
elif isinstance(UpperCamelCase__ , UpperCamelCase__):
lowerCAmelCase = len(UpperCamelCase__)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase__)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase__ , UpperCamelCase__) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(UpperCamelCase__)}.")
# get prompt text embeddings
lowerCAmelCase = self.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowerCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f" {self.tokenizer.model_max_length} tokens: {removed_text}")
lowerCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCAmelCase = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase = text_embeddings.shape
lowerCAmelCase = text_embeddings.repeat(1 , UpperCamelCase__ , 1)
lowerCAmelCase = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCamelCase__ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase = 42
if negative_prompt is None:
lowerCAmelCase = [""]
elif type(UpperCamelCase__) is not type(UpperCamelCase__):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase__)} !="
f" {type(UpperCamelCase__)}.")
elif isinstance(UpperCamelCase__ , UpperCamelCase__):
lowerCAmelCase = [negative_prompt]
elif batch_size != len(UpperCamelCase__):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase__)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
""" the batch size of `prompt`.""")
else:
lowerCAmelCase = negative_prompt
lowerCAmelCase = text_input_ids.shape[-1]
lowerCAmelCase = self.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""pt""" , )
lowerCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase = uncond_embeddings.shape[1]
lowerCAmelCase = uncond_embeddings.repeat(UpperCamelCase__ , UpperCamelCase__ , 1)
lowerCAmelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCamelCase__ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowerCAmelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase = torch.randn(
UpperCamelCase__ , generator=UpperCamelCase__ , device="""cpu""" , dtype=UpperCamelCase__).to(self.device)
lowerCAmelCase = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device="""cpu""" , dtype=UpperCamelCase__).to(
self.device)
else:
lowerCAmelCase = torch.randn(
UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__)
lowerCAmelCase = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__)
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
lowerCAmelCase = latents_reference.to(self.device)
lowerCAmelCase = latents.to(self.device)
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCAmelCase = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCAmelCase = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCAmelCase = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCAmelCase = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCAmelCase = 0 if dx < 0 else dx
lowerCAmelCase = 0 if dy < 0 else dy
lowerCAmelCase = max(-dx , 0)
lowerCAmelCase = max(-dy , 0)
# import pdb
# pdb.set_trace()
lowerCAmelCase = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase__)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
lowerCAmelCase = {}
if accepts_eta:
lowerCAmelCase = eta
for i, t in enumerate(self.progress_bar(UpperCamelCase__)):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
lowerCAmelCase = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__)
# predict the noise residual
lowerCAmelCase = self.unet(UpperCamelCase__ , UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase = noise_pred.chunk(2)
lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
lowerCAmelCase = 1 / 0.18215 * latents
lowerCAmelCase = self.vae.decode(UpperCamelCase__).sample
lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if self.safety_checker is not None:
lowerCAmelCase = self.feature_extractor(self.numpy_to_pil(UpperCamelCase__) , return_tensors="""pt""").to(
self.device)
lowerCAmelCase = self.safety_checker(
images=UpperCamelCase__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype))
else:
lowerCAmelCase = None
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(UpperCamelCase__)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=UpperCamelCase__ , nsfw_content_detected=UpperCamelCase__)
| 272 |
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class snake_case ( __snake_case ):
# to overwrite at feature extractactor specific tests
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Tuple = None
@property
def lowercase_ ( self : str)-> Any:
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def lowercase_ ( self : Tuple)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(UpperCamelCase__ , "feature_size"))
self.assertTrue(hasattr(UpperCamelCase__ , "sampling_rate"))
self.assertTrue(hasattr(UpperCamelCase__ , "padding_value"))
def lowercase_ ( self : Dict)-> Any:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: str = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: int = feat_extract.model_input_names[0]
__lowerCAmelCase: List[Any] = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(UpperCamelCase__) == len(UpperCamelCase__) for x, y in zip(UpperCamelCase__ , processed_features[input_name])))
__lowerCAmelCase: Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase__)
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type="np")
__lowerCAmelCase: Optional[int] = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowerCAmelCase: Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def lowercase_ ( self : Optional[int])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase__)
__lowerCAmelCase: int = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Any = BatchFeature({input_name: speech_inputs} , tensor_type="pt")
__lowerCAmelCase: str = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowerCAmelCase: Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def lowercase_ ( self : str)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase__)
__lowerCAmelCase: Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type="tf")
__lowerCAmelCase: int = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowerCAmelCase: Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : List[str]=False)-> Optional[int]:
'''simple docstring'''
def _inputs_have_equal_length(UpperCamelCase__ : Tuple):
__lowerCAmelCase: Optional[int] = len(input[0])
for input_slice in input[1:]:
if len(UpperCamelCase__) != length:
return False
return True
def _inputs_are_equal(UpperCamelCase__ : str , UpperCamelCase__ : str):
if len(UpperCamelCase__) != len(UpperCamelCase__):
return False
for input_slice_a, input_slice_a in zip(UpperCamelCase__ , UpperCamelCase__):
if not np.allclose(np.asarray(UpperCamelCase__) , np.asarray(UpperCamelCase__) , atol=1e-3):
return False
return True
__lowerCAmelCase: List[str] = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase__)
__lowerCAmelCase: Tuple = feat_extract.model_input_names[0]
__lowerCAmelCase: List[Any] = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: Optional[int] = self.feat_extract_tester.seq_length_diff
__lowerCAmelCase: str = self.feat_extract_tester.max_seq_length + pad_diff
__lowerCAmelCase: Tuple = self.feat_extract_tester.min_seq_length
__lowerCAmelCase: Union[str, Any] = self.feat_extract_tester.batch_size
__lowerCAmelCase: Tuple = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__lowerCAmelCase: Optional[int] = feat_extract.pad(UpperCamelCase__ , padding=UpperCamelCase__)
__lowerCAmelCase: Optional[int] = input_a[input_name]
__lowerCAmelCase: List[Any] = feat_extract.pad(UpperCamelCase__ , padding="longest")
__lowerCAmelCase: int = input_a[input_name]
__lowerCAmelCase: Tuple = feat_extract.pad(UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[-1]))
__lowerCAmelCase: Optional[Any] = input_a[input_name]
__lowerCAmelCase: Union[str, Any] = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np")
__lowerCAmelCase: Optional[int] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , padding="max_length")[input_name]
__lowerCAmelCase: Any = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=UpperCamelCase__ , return_tensors="np")
__lowerCAmelCase: Optional[Any] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_are_equal(UpperCamelCase__ , UpperCamelCase__))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
__lowerCAmelCase: Dict = feat_extract.pad(UpperCamelCase__ , pad_to_multiple_of=1_0)
__lowerCAmelCase: Tuple = input_a[input_name]
__lowerCAmelCase: Tuple = feat_extract.pad(UpperCamelCase__ , padding="longest" , pad_to_multiple_of=1_0)
__lowerCAmelCase: List[str] = input_a[input_name]
__lowerCAmelCase: str = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , pad_to_multiple_of=1_0 , max_length=UpperCamelCase__)
__lowerCAmelCase: str = input_a[input_name]
__lowerCAmelCase: Dict = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , pad_to_multiple_of=1_0 , max_length=UpperCamelCase__ , return_tensors="np" , )
__lowerCAmelCase: List[str] = input_a[input_name]
self.assertTrue(all(len(UpperCamelCase__) % 1_0 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(UpperCamelCase__ , UpperCamelCase__))
__lowerCAmelCase: Optional[Any] = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0
self.assertTrue(all(len(UpperCamelCase__) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
__lowerCAmelCase: Any = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1e-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1e-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1e-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1e-3)
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Optional[int]=False)-> str:
'''simple docstring'''
def _inputs_have_equal_length(UpperCamelCase__ : List[Any]):
__lowerCAmelCase: List[str] = len(input[0])
for input_slice in input[1:]:
if len(UpperCamelCase__) != length:
return False
return True
def _inputs_are_equal(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any]):
if len(UpperCamelCase__) != len(UpperCamelCase__):
return False
for input_slice_a, input_slice_a in zip(UpperCamelCase__ , UpperCamelCase__):
if not np.allclose(np.asarray(UpperCamelCase__) , np.asarray(UpperCamelCase__) , atol=1e-3):
return False
return True
__lowerCAmelCase: Dict = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase__)
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs})
# truncate to smallest
__lowerCAmelCase: int = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , truncation=UpperCamelCase__)
__lowerCAmelCase: Any = input_a[input_name]
__lowerCAmelCase: str = feat_extract.pad(UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]))
__lowerCAmelCase: Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
# truncate to smallest with np
__lowerCAmelCase: Optional[int] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , return_tensors="np" , truncation=UpperCamelCase__ , )
__lowerCAmelCase: List[str] = input_a[input_name]
__lowerCAmelCase: List[str] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , return_tensors="np")
__lowerCAmelCase: Any = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
# truncate to middle
__lowerCAmelCase: Union[str, Any] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[1]) , truncation=UpperCamelCase__ , return_tensors="np" , )
__lowerCAmelCase: int = input_a[input_name]
__lowerCAmelCase: List[Any] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[1]) , truncation=UpperCamelCase__)
__lowerCAmelCase: Dict = input_a[input_name]
__lowerCAmelCase: List[Any] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[1]) , return_tensors="np")
__lowerCAmelCase: str = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_are_equal(UpperCamelCase__ , UpperCamelCase__))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , truncation=UpperCamelCase__)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , padding="longest" , truncation=UpperCamelCase__)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , padding="longest" , truncation=UpperCamelCase__)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , padding="max_length" , truncation=UpperCamelCase__)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__lowerCAmelCase: Tuple = 1_2
__lowerCAmelCase: Optional[int] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , pad_to_multiple_of=UpperCamelCase__ , truncation=UpperCamelCase__ , )
__lowerCAmelCase: Dict = input_a[input_name]
__lowerCAmelCase: List[str] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , pad_to_multiple_of=UpperCamelCase__ , )
__lowerCAmelCase: str = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__lowerCAmelCase: Optional[Any] = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
__lowerCAmelCase: Optional[Any] = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
def lowercase_ ( self : Union[str, Any])-> Optional[Any]:
'''simple docstring'''
self._check_padding(numpify=UpperCamelCase__)
def lowercase_ ( self : Optional[Any])-> List[str]:
'''simple docstring'''
self._check_padding(numpify=UpperCamelCase__)
def lowercase_ ( self : List[Any])-> Tuple:
'''simple docstring'''
self._check_truncation(numpify=UpperCamelCase__)
def lowercase_ ( self : Optional[Any])-> Optional[Any]:
'''simple docstring'''
self._check_truncation(numpify=UpperCamelCase__)
@require_torch
def lowercase_ ( self : Dict)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: str = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: str = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np")[input_name]
__lowerCAmelCase: Dict = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="pt")[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1e-2)
@require_tf
def lowercase_ ( self : Tuple)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Tuple = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: List[Any] = feat_extract.model_input_names[0]
__lowerCAmelCase: Any = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: Dict = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np")[input_name]
__lowerCAmelCase: Any = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="tf")[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1e-2)
def lowercase_ ( self : Optional[int])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.feat_extract_dict
__lowerCAmelCase: Any = True
__lowerCAmelCase: str = self.feature_extraction_class(**UpperCamelCase__)
__lowerCAmelCase: List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: Tuple = [len(UpperCamelCase__) for x in speech_inputs]
__lowerCAmelCase: Optional[Any] = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: Optional[Any] = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np")
self.assertIn("attention_mask" , UpperCamelCase__)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , UpperCamelCase__)
def lowercase_ ( self : List[str])-> Any:
'''simple docstring'''
__lowerCAmelCase: Any = self.feat_extract_dict
__lowerCAmelCase: str = True
__lowerCAmelCase: int = self.feature_extraction_class(**UpperCamelCase__)
__lowerCAmelCase: int = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: str = [len(UpperCamelCase__) for x in speech_inputs]
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: List[str] = min(UpperCamelCase__)
__lowerCAmelCase: List[str] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="np")
self.assertIn("attention_mask" , UpperCamelCase__)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 217 | 0 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any]="" ) -> str:
snake_case = tempfile.mkdtemp()
return os.path.join(__lowerCAmelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : int )-> int:
snake_case = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case = AgentAudio(__snake_case )
snake_case = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__snake_case , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(__snake_case ) )
# Ensure that the file contains the same value as the original tensor
snake_case , snake_case = sf.read(__snake_case )
self.assertTrue(torch.allclose(__snake_case , torch.tensor(__snake_case ) , atol=1e-4 ) )
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
snake_case = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case = get_new_path(suffix=""".wav""" )
sf.write(__snake_case , __snake_case , 1_60_00 )
snake_case = AgentAudio(__snake_case )
self.assertTrue(torch.allclose(__snake_case , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , __snake_case )
@require_vision
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : int )-> Dict:
snake_case = torch.randint(0 , 2_56 , (64, 64, 3) )
snake_case = AgentImage(__snake_case )
snake_case = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__snake_case , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__snake_case ) )
def lowerCAmelCase ( self : Dict )-> List[str]:
snake_case = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
snake_case = Image.open(__snake_case )
snake_case = AgentImage(__snake_case )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__snake_case ) )
def lowerCAmelCase ( self : Optional[Any] )-> Any:
snake_case = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
snake_case = Image.open(__snake_case )
snake_case = AgentImage(__snake_case )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__snake_case ) )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Any )-> Any:
snake_case = """Hey!"""
snake_case = AgentText(__snake_case )
self.assertEqual(__snake_case , agent_type.to_string() )
self.assertEqual(__snake_case , agent_type.to_raw() )
self.assertEqual(__snake_case , __snake_case )
| 359 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
_SCREAMING_SNAKE_CASE = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CamembertTokenizer
snake_case_ = CamembertTokenizerFast
snake_case_ = True
snake_case_ = True
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = """<pad>"""
snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__snake_case ) , 10_04 )
def lowerCAmelCase ( self : List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def lowerCAmelCase ( self : List[str] )-> List[str]:
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
snake_case = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case = tokenizer.convert_ids_to_tokens(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowerCAmelCase ( self : str )-> Any:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer()
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.tokenize(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = self.get_rust_tokenizer()
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : Any )-> Optional[int]:
# fmt: off
snake_case = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__snake_case , )
| 3 | 0 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __lt__( self : int , lowerCamelCase__ : List[Any] ) ->int:
'''simple docstring'''
return self[-1] < other[-1]
def __eq__( self : str , lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
return self[-1] == other[-1]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : list[Stack] = []
# sort into stacks
for element in collection:
_UpperCAmelCase : int = Stack([element] )
_UpperCAmelCase : Tuple = bisect_left(__lowerCAmelCase , __lowerCAmelCase )
if i != len(__lowerCAmelCase ):
stacks[i].append(__lowerCAmelCase )
else:
stacks.append(__lowerCAmelCase )
# use a heap-based merge to merge stack efficiently
_UpperCAmelCase : Union[str, Any] = merge(*(reversed(__lowerCAmelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowerCamelCase__ = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 234 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase__ ) , "Tatoeba directory does not exist." )
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.resolver.write_model_card("opus-mt-he-en" , dry_run=lowerCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 234 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Any = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[Any] = "deit"
def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=16 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = hidden_size
UpperCamelCase : Optional[Any] = num_hidden_layers
UpperCamelCase : List[Any] = num_attention_heads
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : Union[str, Any] = hidden_act
UpperCamelCase : Optional[Any] = hidden_dropout_prob
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : Any = layer_norm_eps
UpperCamelCase : List[Any] = image_size
UpperCamelCase : Dict = patch_size
UpperCamelCase : Dict = num_channels
UpperCamelCase : Any = qkv_bias
UpperCamelCase : Tuple = encoder_stride
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
| 315 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 315 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 42
@flax_register_to_config
class SCREAMING_SNAKE_CASE__ ( nn.Module , _a , _a ):
_a = 32
_a = 4
_a = 4
_a = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_a = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_a = False
_a = (3_20, 6_40, 12_80, 12_80)
_a = 2
_a = 8
_a = None
_a = 12_80
_a = 0.0
_a = False
_a = jnp.floataa
_a = True
_a = 0
_a = False
def __lowercase ( self : Dict , lowerCAmelCase : jax.random.KeyArray ):
# init input tensors
lowerCAmelCase = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase = jnp.zeros(lowerCAmelCase , dtype=jnp.floataa )
lowerCAmelCase = jnp.ones((1,) , dtype=jnp.intaa )
lowerCAmelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCAmelCase , lowerCAmelCase = jax.random.split(lowerCAmelCase )
lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )["params"]
def __lowercase ( self : str ):
lowerCAmelCase = self.block_out_channels
lowerCAmelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCAmelCase = FlaxTimestepEmbedding(lowerCAmelCase , dtype=self.dtype )
lowerCAmelCase = self.only_cross_attention
if isinstance(lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCAmelCase = []
lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowerCAmelCase = output_channel
lowerCAmelCase = block_out_channels[i]
lowerCAmelCase = i == len(lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase = FlaxCrossAttnDownBlockaD(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCAmelCase = FlaxDownBlockaD(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCAmelCase )
lowerCAmelCase = down_blocks
# mid
lowerCAmelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCAmelCase = []
lowerCAmelCase = list(reversed(lowerCAmelCase ) )
lowerCAmelCase = list(reversed(lowerCAmelCase ) )
lowerCAmelCase = list(reversed(lowerCAmelCase ) )
lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowerCAmelCase = output_channel
lowerCAmelCase = reversed_block_out_channels[i]
lowerCAmelCase = reversed_block_out_channels[min(i + 1 , len(lowerCAmelCase ) - 1 )]
lowerCAmelCase = i == len(lowerCAmelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCAmelCase = FlaxCrossAttnUpBlockaD(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , prev_output_channel=lowerCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCAmelCase = FlaxUpBlockaD(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , prev_output_channel=lowerCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowerCAmelCase )
lowerCAmelCase = output_channel
lowerCAmelCase = up_blocks
# out
lowerCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : bool = True , lowerCAmelCase : bool = False , ):
# 1. time
if not isinstance(lowerCAmelCase , jnp.ndarray ):
lowerCAmelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCAmelCase = timesteps.astype(dtype=jnp.floataa )
lowerCAmelCase = jnp.expand_dims(lowerCAmelCase , 0 )
lowerCAmelCase = self.time_proj(lowerCAmelCase )
lowerCAmelCase = self.time_embedding(lowerCAmelCase )
# 2. pre-process
lowerCAmelCase = jnp.transpose(lowerCAmelCase , (0, 2, 3, 1) )
lowerCAmelCase = self.conv_in(lowerCAmelCase )
# 3. down
lowerCAmelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase , lowerCAmelCase = down_block(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , deterministic=not train )
else:
lowerCAmelCase , lowerCAmelCase = down_block(lowerCAmelCase , lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCAmelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowerCAmelCase , lowerCAmelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase = new_down_block_res_samples
# 4. mid
lowerCAmelCase = self.mid_block(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCAmelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCAmelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase = up_block(
lowerCAmelCase , temb=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , res_hidden_states_tuple=lowerCAmelCase , deterministic=not train , )
else:
lowerCAmelCase = up_block(lowerCAmelCase , temb=lowerCAmelCase , res_hidden_states_tuple=lowerCAmelCase , deterministic=not train )
# 6. post-process
lowerCAmelCase = self.conv_norm_out(lowerCAmelCase )
lowerCAmelCase = nn.silu(lowerCAmelCase )
lowerCAmelCase = self.conv_out(lowerCAmelCase )
lowerCAmelCase = jnp.transpose(lowerCAmelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowerCAmelCase )
| 155 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ):
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def __lowercase ( self : Tuple ):
super().setUp()
# fmt: off
lowerCAmelCase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
lowerCAmelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase ) )
def __lowercase ( self : Optional[Any] , **lowerCAmelCase : str ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def __lowercase ( self : Any , **lowerCAmelCase : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Dict ):
lowerCAmelCase = """lower newer"""
lowerCAmelCase = """lower newer"""
return input_text, output_text
def __lowercase ( self : int ):
lowerCAmelCase = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = tokens + [tokenizer.unk_token]
lowerCAmelCase = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
@require_ftfy
def __lowercase ( self : Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
lowerCAmelCase = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
lowerCAmelCase = tokenizer_s.tokenize(lowerCAmelCase )
lowerCAmelCase = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCAmelCase = """xa\u0303y""" + """ """ + """x\xe3y"""
lowerCAmelCase = tokenizer_s.tokenize(lowerCAmelCase )
lowerCAmelCase = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Test that the tokenization is identical on unicode of space type
lowerCAmelCase = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCAmelCase = tokenizer_s.tokenize(lowerCAmelCase )
lowerCAmelCase = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Test that the tokenization is identical on unicode of line break type
lowerCAmelCase = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCAmelCase = tokenizer_s.tokenize(lowerCAmelCase )
lowerCAmelCase = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : Any ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase = f'''{text_of_1_token} {text_of_1_token}'''
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , )
lowerCAmelCase = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase ) + 1, len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
lowerCAmelCase = f''' {text}'''
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , )
lowerCAmelCase = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase ) + 1, 1 + len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
def __lowercase ( self : Dict ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCAmelCase ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def __lowercase ( self : Optional[int] ):
super().test_tokenization_python_rust_equals()
def __lowercase ( self : Optional[int] ):
# CLIP always lower cases letters
pass
| 155 | 1 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE ) -> str:
__lowerCAmelCase: Tuple = []
__lowerCAmelCase: List[Any] = []
__lowerCAmelCase: List[str] = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__lowerCAmelCase: Dict = len(UpperCamelCase__ ) if (len(UpperCamelCase__ ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(UpperCamelCase__ ) , "Postfix".center(UpperCamelCase__ ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(UpperCamelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(UpperCamelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(UpperCamelCase__ ) == 0:
stack.append(UpperCamelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(UpperCamelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(UpperCamelCase__ ) # push x to stack
print(
x.center(8 ) , ("".join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , ("".join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , sep=" | " , ) # Output in tabular format
while len(UpperCamelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , ("".join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , sep=" | " , ) # Output in tabular format
return "".join(UpperCamelCase__ ) # return Postfix as str
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: int = list(infix[::-1] ) # reverse the infix equation
for i in range(len(UpperCamelCase__ ) ):
if infix[i] == "(":
__lowerCAmelCase: Any = ''')''' # change "(" to ")"
elif infix[i] == ")":
__lowerCAmelCase: List[str] = '''(''' # change ")" to "("
return (infix_2_postfix("".join(UpperCamelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__A = input("\nEnter an Infix Equation = ") # Input an Infix equation
__A = ''.join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 352 |
"""simple docstring"""
from math import ceil
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
__lowerCAmelCase: Tuple = list(range(0 , __SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Optional[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__lowerCAmelCase: List[Any] = []
for i in device_map_blocks:
if device_map_blocks.count(__SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__SCREAMING_SNAKE_CASE )
# Missing blocks
__lowerCAmelCase: Optional[Any] = [i for i in blocks if i not in device_map_blocks]
__lowerCAmelCase: List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(__SCREAMING_SNAKE_CASE ) )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
__lowerCAmelCase: List[Any] = list(range(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Dict = int(ceil(n_layers / len(__SCREAMING_SNAKE_CASE ) ) )
__lowerCAmelCase: Union[str, Any] = [layers[i : i + n_blocks] for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )]
return dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
| 108 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowercase ( ) -> str:
__a = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
__a = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase__ )
DownloadCommand.register_subcommand(lowerCAmelCase__ )
EnvironmentCommand.register_subcommand(lowerCAmelCase__ )
RunCommand.register_subcommand(lowerCAmelCase__ )
ServeCommand.register_subcommand(lowerCAmelCase__ )
UserCommands.register_subcommand(lowerCAmelCase__ )
AddNewModelCommand.register_subcommand(lowerCAmelCase__ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase__ )
LfsCommands.register_subcommand(lowerCAmelCase__ )
PTtoTFCommand.register_subcommand(lowerCAmelCase__ )
# Let's go
__a = parser.parse_args()
if not hasattr(lowerCAmelCase__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__a = args.func(lowerCAmelCase__ )
service.run()
if __name__ == "__main__":
main()
| 45 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowercase ( lowerCAmelCase__ : Namespace ) -> Tuple:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowercase_ = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( _a ):
__a = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=_a , required=_a , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=_a , required=_a , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=_a , required=_a , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=_a , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=_a , default=_a , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=_a )
def __init__( self , _a , _a , _a , _a , _a , *_a , ):
__a = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(f'''Loading model {model_type}''' )
__a = model_type
__a = tf_checkpoint
__a = pytorch_dump_output
__a = config
__a = finetuning_task_name
def __UpperCAmelCase ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
if "ckpt" in self._tf_checkpoint.lower():
__a = self._tf_checkpoint
__a = ''''''
else:
__a = self._tf_checkpoint
__a = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
_a , self._config , self._pytorch_dump_output , _a )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 45 | 1 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
UpperCAmelCase__ : int = HfArgumentParser(InitializationArguments)
UpperCAmelCase__ : Union[str, Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
UpperCAmelCase__ : List[str] = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
UpperCAmelCase__ : Union[str, Any] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
UpperCAmelCase__ : Union[str, Any] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 362 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : Any = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : Optional[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowerCamelCase__ ( a , a , a ) -> Union[str, Any]:
_A: Optional[int] = SavedModel()
_A: int = []
with open(os.path.join(a , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
_A: List[Any] = json.load(a )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(a )] )
with open(a , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
_A: Optional[Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_A: Optional[int] = sorted(a )
_A: Tuple = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(a )
if strict and len(a ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(a ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*a , sep='''\n''' )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : int = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 301 | 0 |
"""simple docstring"""
import argparse
import os
import re
_lowercase : str = "src/diffusers"
# Pattern that looks at the indentation in a line.
_lowercase : List[Any] = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
_lowercase : int = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowercase : Optional[int] = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
_lowercase : List[Any] = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowercase : str = re.compile(r"\[([^\]]+)\]")
def snake_case__ ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =_re_indent.search(__lowerCamelCase )
return "" if search is None else search.groups()[0]
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="" , __lowerCamelCase : int=None , __lowerCamelCase : Optional[int]=None ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : Any =code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(__lowerCamelCase ):
index += 1
lowerCamelCase__ : Dict =['''\n'''.join(lines[:index] )]
else:
lowerCamelCase__ : Tuple =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCamelCase__ : int =[lines[index]]
index += 1
while index < len(__lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(__lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(__lowerCamelCase ) )
if index < len(__lowerCamelCase ) - 1:
lowerCamelCase__ : str =[lines[index + 1]]
index += 1
else:
lowerCamelCase__ : str =[]
else:
blocks.append('''\n'''.join(__lowerCamelCase ) )
lowerCamelCase__ : str =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__lowerCamelCase ) > 0:
blocks.append('''\n'''.join(__lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__lowerCamelCase ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
def _inner(__lowerCamelCase : Any ):
return key(__lowerCamelCase ).lower().replace('''_''' , '''''' )
return _inner
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Any=None ):
"""simple docstring"""
# If no key is provided, we use a noop.
def noop(__lowerCamelCase : List[str] ):
return x
if key is None:
lowerCamelCase__ : Tuple =noop
# Constants are all uppercase, they go first.
lowerCamelCase__ : Union[str, Any] =[obj for obj in objects if key(__lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCamelCase__ : Optional[int] =[obj for obj in objects if key(__lowerCamelCase )[0].isupper() and not key(__lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCamelCase__ : Optional[Any] =[obj for obj in objects if not key(__lowerCamelCase )[0].isupper()]
lowerCamelCase__ : int =ignore_underscore(__lowerCamelCase )
return sorted(__lowerCamelCase , key=__lowerCamelCase ) + sorted(__lowerCamelCase , key=__lowerCamelCase ) + sorted(__lowerCamelCase , key=__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
# This inner function sort imports between [ ].
def _replace(__lowerCamelCase : Optional[Any] ):
lowerCamelCase__ : Dict =match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
lowerCamelCase__ : List[Any] =[part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase__ : Optional[int] =keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(__lowerCamelCase )] ) + "]"
lowerCamelCase__ : List[Any] =import_statement.split('''\n''' )
if len(__lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCamelCase__ : Tuple =2 if lines[1].strip() == '''[''' else 1
lowerCamelCase__ : Any =[(i, _re_strip_line.search(__lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCamelCase__ : List[Any] =sort_objects(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] )
lowerCamelCase__ : Union[str, Any] =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCamelCase__ : List[str] =_re_bracket_content.sub(_replace , lines[1] )
else:
lowerCamelCase__ : str =[part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase__ : Any =keys[:-1]
lowerCamelCase__ : List[Any] =get_indent(lines[1] ) + ''', '''.join([f'''"{k}"''' for k in sort_objects(__lowerCamelCase )] )
return "\n".join(__lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
lowerCamelCase__ : Union[str, Any] =_re_bracket_content.sub(_replace , __lowerCamelCase )
return import_statement
def snake_case__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=True ):
"""simple docstring"""
with open(__lowerCamelCase , '''r''' ) as f:
lowerCamelCase__ : Optional[int] =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCamelCase__ : int =split_code_in_indented_blocks(
__lowerCamelCase , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCamelCase__ : Optional[Any] =main_blocks[block_idx]
lowerCamelCase__ : List[str] =block.split('''\n''' )
# Get to the start of the imports.
lowerCamelCase__ : Any =0
while line_idx < len(__lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCamelCase__ : Tuple =len(__lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(__lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCamelCase__ : Any ='''\n'''.join(block_lines[line_idx:-1] )
lowerCamelCase__ : Dict =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCamelCase__ : List[Any] =split_code_in_indented_blocks(__lowerCamelCase , indent_level=__lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCamelCase__ : List[Any] =_re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCamelCase__ : Union[str, Any] =[(pattern.search(__lowerCamelCase ).groups()[0] if pattern.search(__lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCamelCase__ : Optional[Any] =[(i, key) for i, key in enumerate(__lowerCamelCase ) if key is not None]
lowerCamelCase__ : List[Any] =[x[0] for x in sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCamelCase__ : Optional[Any] =0
lowerCamelCase__ : Tuple =[]
for i in range(len(__lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCamelCase__ : List[Any] =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
lowerCamelCase__ : str ='''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__lowerCamelCase ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(__lowerCamelCase , '''w''' ) as f:
f.write('''\n'''.join(__lowerCamelCase ) )
def snake_case__ ( __lowerCamelCase : Optional[Any]=True ):
"""simple docstring"""
lowerCamelCase__ : Any =[]
for root, _, files in os.walk(__lowerCamelCase ):
if "__init__.py" in files:
lowerCamelCase__ : Tuple =sort_imports(os.path.join(__lowerCamelCase , '''__init__.py''' ) , check_only=__lowerCamelCase )
if result:
lowerCamelCase__ : List[str] =[os.path.join(__lowerCamelCase , '''__init__.py''' )]
if len(__lowerCamelCase ) > 0:
raise ValueError(f'''Would overwrite {len(__lowerCamelCase )} files, run `make style`.''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
_lowercase : List[Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 238 |
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if b == 0:
return (1, 0)
((lowerCamelCase__) , (lowerCamelCase__)) : Any =extended_euclid(__lowerCamelCase , a % b )
lowerCamelCase__ : Optional[Any] =a // b
return (y, x - k * y)
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
((lowerCamelCase__) , (lowerCamelCase__)) : Any =extended_euclid(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[Any] =na * na
lowerCamelCase__ : Union[str, Any] =ra * x * na + ra * y * na
return (n % m + m) % m
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
((lowerCamelCase__) , (lowerCamelCase__)) : int =extended_euclid(__lowerCamelCase , __lowerCamelCase )
if b < 0:
lowerCamelCase__ : Any =(b % n + n) % n
return b
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Any =invert_modulo(__lowerCamelCase , __lowerCamelCase ), invert_modulo(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Tuple =na * na
lowerCamelCase__ : Optional[Any] =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 238 | 1 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCAmelCase =logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,**lowerCamelCase_ ) -> List[str]:
super().__init__(**lowerCamelCase_ )
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
requires_backends(self ,"""vision""" )
self.check_model_type(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> int:
if "text_queries" in kwargs:
A = kwargs.pop("""text_queries""" )
if isinstance(lowerCamelCase_ ,(str, Image.Image) ):
A = {"""image""": image, """candidate_labels""": candidate_labels}
else:
A = image
A = super().__call__(lowerCamelCase_ ,**lowerCamelCase_ )
return results
def UpperCamelCase__ ( self ,**lowerCamelCase_ ) -> Any:
A = {}
if "threshold" in kwargs:
A = kwargs["""threshold"""]
if "top_k" in kwargs:
A = kwargs["""top_k"""]
return {}, {}, postprocess_params
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> int:
A = load_image(inputs["""image"""] )
A = inputs["""candidate_labels"""]
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
A = candidate_labels.split(""",""" )
A = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(lowerCamelCase_ ):
A = self.tokenizer(lowerCamelCase_ ,return_tensors=self.framework )
A = self.image_processor(lowerCamelCase_ ,return_tensors=self.framework )
yield {
"is_last": i == len(lowerCamelCase_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Tuple:
A = model_inputs.pop("""target_size""" )
A = model_inputs.pop("""candidate_label""" )
A = model_inputs.pop("""is_last""" )
A = self.model(**lowerCamelCase_ )
A = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=0.1 ,lowerCamelCase_=None ) -> str:
A = []
for model_output in model_outputs:
A = model_output["""candidate_label"""]
A = BaseModelOutput(lowerCamelCase_ )
A = self.image_processor.post_process_object_detection(
outputs=lowerCamelCase_ ,threshold=lowerCamelCase_ ,target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
A = outputs["""scores"""][index].item()
A = self._get_bounding_box(outputs["""boxes"""][index][0] )
A = {"""score""": score, """label""": label, """box""": box}
results.append(lowerCamelCase_ )
A = sorted(lowerCamelCase_ ,key=lambda lowerCamelCase_ : x["score"] ,reverse=lowerCamelCase_ )
if top_k:
A = results[:top_k]
return results
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
A , A , A , A = box.int().tolist()
A = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 77 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''OwlViTImageProcessor'''
_lowerCamelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,**lowerCamelCase_ ) -> Tuple:
A = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,lowerCamelCase_ ,)
A = kwargs.pop("""feature_extractor""" )
A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase_ ,lowerCamelCase_ )
def __call__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_="max_length" ,lowerCamelCase_="np" ,**lowerCamelCase_ ) -> Optional[Any]:
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or (isinstance(lowerCamelCase_ ,lowerCamelCase_ ) and not isinstance(text[0] ,lowerCamelCase_ )):
A = [self.tokenizer(lowerCamelCase_ ,padding=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )]
elif isinstance(lowerCamelCase_ ,lowerCamelCase_ ) and isinstance(text[0] ,lowerCamelCase_ ):
A = []
# Maximum number of queries across batch
A = max([len(lowerCamelCase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCamelCase_ ) != max_num_queries:
A = t + [""" """] * (max_num_queries - len(lowerCamelCase_ ))
A = self.tokenizer(lowerCamelCase_ ,padding=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )
encodings.append(lowerCamelCase_ )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
A = np.concatenate([encoding["""input_ids"""] for encoding in encodings] ,axis=0 )
A = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] ,axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] ,axis=0 )
A = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] ,axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A = torch.cat([encoding["""input_ids"""] for encoding in encodings] ,dim=0 )
A = torch.cat([encoding["""attention_mask"""] for encoding in encodings] ,dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A = tf.stack([encoding["""input_ids"""] for encoding in encodings] ,axis=0 )
A = tf.stack([encoding["""attention_mask"""] for encoding in encodings] ,axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
A = BatchEncoding()
A = input_ids
A = attention_mask
if query_images is not None:
A = BatchEncoding()
A = self.image_processor(
lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ ).pixel_values
A = query_pixel_values
if images is not None:
A = self.image_processor(lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )
if text is not None and images is not None:
A = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ) ,tensor_type=lowerCamelCase_ )
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> int:
return self.image_processor.post_process(*lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Optional[Any]:
return self.image_processor.post_process_object_detection(*lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Optional[Any]:
return self.image_processor.post_process_image_guided_detection(*lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> List[str]:
return self.tokenizer.batch_decode(*lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> List[str]:
return self.tokenizer.decode(*lowerCamelCase_ ,**lowerCamelCase_ )
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,lowerCamelCase_ ,)
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,lowerCamelCase_ ,)
return self.image_processor
| 77 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
a = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ):
def __init__( self : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple ):
super().__init__()
self.register_modules(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self : int , lowerCAmelCase : Dict = 1 , lowerCAmelCase : Tuple = 100 , lowerCAmelCase : Tuple = None , lowerCAmelCase : List[Any] = None , lowerCAmelCase : int = True , ):
if audio_length_in_s is None:
lowerCAmelCase = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCAmelCase = audio_length_in_s * self.unet.config.sample_rate
lowerCAmelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCAmelCase = int(lowerCAmelCase )
if sample_size % down_scale_factor != 0:
lowerCAmelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
""" process.""" )
lowerCAmelCase = int(lowerCAmelCase )
lowerCAmelCase = next(iter(self.unet.parameters() ) ).dtype
lowerCAmelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase = randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=self.device , dtype=lowerCAmelCase )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase , device=audio.device )
lowerCAmelCase = self.scheduler.timesteps.to(lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase = self.unet(lowerCAmelCase , lowerCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCAmelCase = self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
lowerCAmelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowerCAmelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCAmelCase )
| 155 |
'''simple docstring'''
import string
def _lowercase ( __A ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(__A )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = input("""Encrypted message: """ )
__UpperCamelCase = message.upper()
decrypt(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 349 | 0 |
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ) -> float:
if (
not isinstance(_lowerCamelCase ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ) -> float:
if (
not isinstance(_lowerCamelCase ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 126 | """simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
| 126 | 1 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=_snake_case ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=_snake_case )
return parser.parse_args()
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ : int = script_fpath.stem
SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(_snake_case )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
if not (isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_snake_case )
SCREAMING_SNAKE_CASE__ : int = len(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(1 ,texta_length + 1 ):
for j in range(1 ,texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
SCREAMING_SNAKE_CASE__ : int = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
SCREAMING_SNAKE_CASE__ : List[Any] = i
SCREAMING_SNAKE_CASE__ : List[str] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
def _lowerCAmelCase ( __lowerCAmelCase ) -> bool:
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(__lowerCAmelCase ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(__lowerCAmelCase ) == 1:
return True
snake_case__ : Any = series[1] - series[0]
for index in range(len(__lowerCAmelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _lowerCAmelCase ( __lowerCAmelCase ) -> float:
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(__lowerCAmelCase ) == 0:
raise ValueError('''Input list must be a non empty list''' )
snake_case__ : Any = 0
for val in series:
answer += val
return answer / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
from sklearn.metrics import mean_squared_error
import datasets
A__ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
A__ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
A__ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def __lowerCamelCase ( self :List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] ,)
def __lowerCamelCase ( self :Tuple ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __lowerCamelCase ( self :List[str] ,__lowercase :Optional[int] ,__lowercase :int ,__lowercase :Any=None ,__lowercase :List[str]="uniform_average" ,__lowercase :List[Any]=True ):
snake_case__ : Union[str, Any] = mean_squared_error(
__lowercase ,__lowercase ,sample_weight=__lowercase ,multioutput=__lowercase ,squared=__lowercase )
return {"mse": mse}
| 44 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class _lowerCamelCase ( _lowercase ):
"""simple docstring"""
snake_case = "transfo-xl"
snake_case = ["mems"]
snake_case = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _SCREAMING_SNAKE_CASE=26_7735 , _SCREAMING_SNAKE_CASE=[2_0000, 4_0000, 20_0000] , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=1600 , _SCREAMING_SNAKE_CASE=1000 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="normal" , _SCREAMING_SNAKE_CASE=0.0_1 , _SCREAMING_SNAKE_CASE=0.0_1 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE , )->List[str]:
'''simple docstring'''
A_ : List[Any] = vocab_size
A_ : Dict = []
self.cutoffs.extend(_SCREAMING_SNAKE_CASE )
if proj_share_all_but_first:
A_ : Any = [False] + [True] * len(self.cutoffs )
else:
A_ : Optional[Any] = [False] + [False] * len(self.cutoffs )
A_ : Optional[int] = d_model
A_ : int = d_embed
A_ : Optional[Any] = d_head
A_ : Any = d_inner
A_ : Tuple = div_val
A_ : Union[str, Any] = pre_lnorm
A_ : Any = n_layer
A_ : Tuple = n_head
A_ : List[str] = mem_len
A_ : Dict = same_length
A_ : Optional[Any] = attn_type
A_ : Tuple = clamp_len
A_ : List[Any] = sample_softmax
A_ : str = adaptive
A_ : Tuple = dropout
A_ : Dict = dropatt
A_ : Optional[int] = untie_r
A_ : int = init
A_ : List[str] = init_range
A_ : List[Any] = proj_init_std
A_ : Any = init_std
A_ : List[Any] = layer_norm_epsilon
super().__init__(eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self )->Dict:
'''simple docstring'''
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 186 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCamelCase_ : Tuple = len(_lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : List[str] = longest_subsequence(_lowercase )
if len(_lowercase ) > len(_lowercase ):
lowerCamelCase_ : Any = temp_array
else:
i += 1
lowerCamelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : str = [pivot, *longest_subsequence(_lowercase )]
if len(_lowercase ) > len(_lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 0 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class a :
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ) -> str:
pass
def snake_case ( snake_case__ :Image) -> str:
_A = hashlib.mda(image.tobytes())
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Tuple = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_A = DepthEstimationPipeline(model=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_A = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCAmelCase_ )
import datasets
_A = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
_A = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , lowerCAmelCase_ , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def UpperCAmelCase ( self ) -> Optional[Any]:
pass
@slow
@require_torch
def UpperCAmelCase ( self ) -> Tuple:
_A = """Intel/dpt-large"""
_A = pipeline("""depth-estimation""" , model=lowerCAmelCase_ )
_A = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
_A = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def UpperCAmelCase ( self ) -> Union[str, Any]:
# This is highly irregular to have no small tests.
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 81 | from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
_SCREAMING_SNAKE_CASE = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[PIL.Image.Image, np.ndarray]
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> str:
super().__init__()
self.register_modules(
prior=lowerCAmelCase_ , image_encoder=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , renderer=lowerCAmelCase_ , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
if latents is None:
_A = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A = latents.to(lowerCAmelCase_ )
_A = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self , lowerCAmelCase_=0 ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_A = torch.device(F'''cuda:{gpu_id}''' )
_A = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase_ , lowerCAmelCase_ )
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowerCAmelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> Optional[int]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(image[0] , torch.Tensor ):
_A = torch.cat(lowerCAmelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(lowerCAmelCase_ , axis=0 )
if not isinstance(lowerCAmelCase_ , torch.Tensor ):
_A = self.image_processor(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_A = image.to(dtype=self.image_encoder.dtype , device=lowerCAmelCase_ )
_A = self.image_encoder(lowerCAmelCase_ )["""last_hidden_state"""]
_A = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_A = image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
_A = torch.zeros_like(lowerCAmelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 25 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 4.0 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , ) -> List[str]:
if isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_A = 1
elif isinstance(lowerCAmelCase_ , torch.Tensor ):
_A = image.shape[0]
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_A = len(lowerCAmelCase_ )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowerCAmelCase_ )}''' )
_A = self._execution_device
_A = batch_size * num_images_per_prompt
_A = guidance_scale > 1.0
_A = self._encode_image(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# prior
self.scheduler.set_timesteps(lowerCAmelCase_ , device=lowerCAmelCase_ )
_A = self.scheduler.timesteps
_A = self.prior.config.num_embeddings
_A = self.prior.config.embedding_dim
_A = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_A = latents.reshape(latents.shape[0] , lowerCAmelCase_ , lowerCAmelCase_ )
for i, t in enumerate(self.progress_bar(lowerCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
_A = self.prior(
lowerCAmelCase_ , timestep=lowerCAmelCase_ , proj_embedding=lowerCAmelCase_ , ).predicted_image_embedding
# remove the variance
_A , _A = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_A , _A = noise_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_A = self.scheduler.step(
lowerCAmelCase_ , timestep=lowerCAmelCase_ , sample=lowerCAmelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowerCAmelCase_ )
_A = []
for i, latent in enumerate(lowerCAmelCase_ ):
print()
_A = self.renderer.decode(
latent[None, :] , lowerCAmelCase_ , size=lowerCAmelCase_ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(lowerCAmelCase_ )
_A = torch.stack(lowerCAmelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_A = images.cpu().numpy()
if output_type == "pil":
_A = [self.numpy_to_pil(lowerCAmelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowerCAmelCase_ )
| 81 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for attribute in key.split('''.''' ):
__UpperCamelCase :str = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__UpperCamelCase :Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCamelCase :str = value
elif weight_type == "weight_g":
__UpperCamelCase :List[str] = value
elif weight_type == "weight_v":
__UpperCamelCase :str = value
elif weight_type == "bias":
__UpperCamelCase :Union[str, Any] = value
else:
__UpperCamelCase :str = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = []
__UpperCamelCase :int = fairseq_model.state_dict()
__UpperCamelCase :List[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase :List[Any] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCamelCase :List[str] = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase :Dict = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
__UpperCamelCase :Optional[Any] = True
if "*" in mapped_key:
__UpperCamelCase :List[str] = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
__UpperCamelCase :Optional[int] = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__UpperCamelCase :int = '''weight_g'''
elif "weight_v" in name:
__UpperCamelCase :List[Any] = '''weight_v'''
elif "weight" in name:
__UpperCamelCase :Dict = '''weight'''
elif "bias" in name:
__UpperCamelCase :Dict = '''bias'''
else:
__UpperCamelCase :Dict = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = full_name.split('''conv_layers.''' )[-1]
__UpperCamelCase :Optional[int] = name.split('''.''' )
__UpperCamelCase :str = int(items[0] )
__UpperCamelCase :List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCamelCase :Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCamelCase :Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCamelCase :int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCamelCase :Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase :Tuple = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :Optional[int] = HubertConfig()
if is_finetuned:
if dict_path:
__UpperCamelCase :Optional[int] = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase :Optional[int] = target_dict.pad_index
__UpperCamelCase :Dict = target_dict.bos_index
__UpperCamelCase :str = target_dict.eos_index
__UpperCamelCase :Dict = len(target_dict.symbols )
__UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False
__UpperCamelCase :Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Any = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :str = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCamelCase :Dict = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__lowercase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 43 | import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowercase = (720, 1280) # Height, Width
__lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowercase = 1 / 100
__lowercase = ''''''
__lowercase = ''''''
__lowercase = ''''''
__lowercase = 250
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[Any] = get_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for index in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 4 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = update_image_and_anno(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , filter_scale=SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase :List[Any] = random_chars(32 )
__UpperCamelCase :List[str] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCamelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__UpperCamelCase :Optional[Any] = []
for anno in new_annos:
__UpperCamelCase :int = anno[3] - anno[1]
__UpperCamelCase :Optional[int] = anno[4] - anno[2]
__UpperCamelCase :int = anno[1] + width / 2
__UpperCamelCase :List[str] = anno[2] + height / 2
__UpperCamelCase :str = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(SCREAMING_SNAKE_CASE )
with open(f"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = []
__UpperCamelCase :str = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
__UpperCamelCase :Any = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE ) as in_file:
__UpperCamelCase :str = in_file.readlines()
__UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , f"""{label_name}.jpg""" )
__UpperCamelCase :int = []
for obj_list in obj_lists:
__UpperCamelCase :Optional[int] = obj_list.rstrip('''\n''' ).split(''' ''' )
__UpperCamelCase :Any = float(obj[1] ) - float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) - float(obj[4] ) / 2
__UpperCamelCase :Dict = float(obj[1] ) + float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
return img_paths, labels
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , ):
'''simple docstring'''
__UpperCamelCase :List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCamelCase :List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :Optional[int] = int(scale_x * output_size[1] )
__UpperCamelCase :Any = int(scale_y * output_size[0] )
__UpperCamelCase :List[str] = []
__UpperCamelCase :Dict = []
for i, index in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Any = all_img_list[index]
path_list.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = all_annos[index]
__UpperCamelCase :Union[str, Any] = cva.imread(SCREAMING_SNAKE_CASE )
if i == 0: # top-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
__UpperCamelCase :Union[str, Any] = img
for bbox in img_annos:
__UpperCamelCase :Union[str, Any] = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = bbox[2] * scale_y
__UpperCamelCase :int = bbox[3] * scale_x
__UpperCamelCase :Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCamelCase :List[str] = img
for bbox in img_annos:
__UpperCamelCase :str = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Dict = bbox[2] * scale_y
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Tuple = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Tuple = bbox[3] * scale_x
__UpperCamelCase :Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCamelCase :Optional[int] = cva.resize(
SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCamelCase :List[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase :Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 43 | 1 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a : Dict = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a : List[str] = 10
a : Optional[int] = 256
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->Optional[MinHash]:
'''simple docstring'''
if len(_lowercase ) < MIN_NUM_TOKENS:
return None
a : Any = MinHash(num_perm=_lowercase )
for token in set(_lowercase ):
min_hash.update(token.encode() )
return min_hash
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_lowercase ) if len(t.strip() ) > 0}
class __UpperCamelCase :
def __init__( self , *,
lowerCAmelCase__ = 0.85 , ) -> Any:
a : Any = duplication_jaccard_threshold
a : Dict = NUM_PERM
a : Dict = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
a : List[str] = defaultdict(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
a : Any = self._index.query(lowerCAmelCase__ )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(lowerCAmelCase__ , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCAmelCase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCAmelCase__ )
def __a ( self ) -> List[List[Dict]]:
a : Any = []
for base, duplicates in self._duplicate_clusters.items():
a : Any = [base] + list(lowerCAmelCase__ )
# reformat the cluster to be a list of dict
a : Optional[int] = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(lowerCAmelCase__ )
return duplicate_clusters
def __a ( self , lowerCAmelCase__ ) -> None:
a : Optional[int] = self.get_duplicate_clusters()
with open(lowerCAmelCase__ , "w" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->Optional[Any]:
'''simple docstring'''
a : Optional[Any] = element
a : Any = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _SCREAMING_SNAKE_CASE ( _lowercase : Type[Dataset] ) ->Optional[int]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_lowercase , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def _SCREAMING_SNAKE_CASE ( _lowercase : Type[Dataset] , _lowercase : float ) ->Dict:
'''simple docstring'''
a : Optional[int] = DuplicationIndex(duplication_jaccard_threshold=_lowercase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowercase ) ) , max_queue_size=100 ) ):
di.add(_lowercase , _lowercase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : str ) ->float:
'''simple docstring'''
a : Any = get_tokens(_lowercase )
a : List[str] = get_tokens(_lowercase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a : Dict = None
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Dict ) ->Tuple:
'''simple docstring'''
a : Union[str, Any] = []
for elementa in cluster:
a : List[Any] = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
a : Optional[int] = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(_lowercase , _lowercase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
a : Optional[Any] = 1
extremes.append(_lowercase )
return extremes
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Any , _lowercase : Optional[Any] ) ->Any:
'''simple docstring'''
global _shared_dataset
a : Tuple = dataset
a : List[Any] = []
a : int = partial(_find_cluster_extremes_shared , jaccard_threshold=_lowercase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowercase , _lowercase , ) , total=len(_lowercase ) , ):
extremes_list.append(_lowercase )
return extremes_list
def _SCREAMING_SNAKE_CASE ( _lowercase : Type[Dataset] , _lowercase : float = 0.85 ) ->Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
a : str = make_duplicate_clusters(_lowercase , _lowercase )
a : List[Any] = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
a : List[Any] = {}
a : str = find_extremes(_lowercase , _lowercase , _lowercase )
for extremes in extremes_clusters:
for element in extremes:
a : Optional[Any] = element
a : Union[str, Any] = duplicate_indices - set(extreme_dict.keys() )
a : Union[str, Any] = dataset.filter(lambda _lowercase , _lowercase : idx not in remove_indices , with_indices=_lowercase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
a : Union[str, Any] = element["base_index"] in extreme_dict
if element["is_extreme"]:
a : Optional[int] = extreme_dict[element["base_index"]]["copies"]
print(F"""Original dataset size: {len(_lowercase )}""" )
print(F"""Number of duplicate clusters: {len(_lowercase )}""" )
print(F"""Files in duplicate cluster: {len(_lowercase )}""" )
print(F"""Unique files in duplicate cluster: {len(_lowercase )}""" )
print(F"""Filtered dataset size: {len(_lowercase )}""" )
return ds_filter, duplicate_clusters
| 355 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( _lowercase : list[list[int]] ) ->int:
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_lowercase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_lowercase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79 | 0 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
if n == 1 or not isinstance(__snake_case , __snake_case ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase_ : Tuple = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Any = 2
while digits < n:
index += 1
UpperCAmelCase_ : Tuple = len(str(fibonacci(__snake_case ) ) )
return index
def lowercase__ ( __snake_case : int = 1_000 ):
'''simple docstring'''
return fibonacci_digits_index(__snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 29 |
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 92 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]:
A_ = filter(lambda UpperCAmelCase__ : p.requires_grad, model.parameters() )
A_ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__lowerCamelCase = logging.getLogger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Union[str, Any]:
if metric == "rouge2":
A_ = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
A_ = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
A_ = """{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
A_ = """{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
""" function.""" )
A_ = ModelCheckpoint(
dirpath=UpperCAmelCase__, filename=UpperCAmelCase__, monitor=F'''val_{metric}''', mode="""max""", save_top_k=1, every_n_epochs=1, )
return checkpoint_callback
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
return EarlyStopping(
monitor=F'''val_{metric}''', mode="""min""" if """loss""" in metric else """max""", patience=UpperCAmelCase__, verbose=UpperCAmelCase__, )
class A__ ( pl.Callback ):
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = {f'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(A_ )
@rank_zero_only
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True ) -> None:
'''simple docstring'''
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
A_ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
A_ = Path(pl_module.hparams.output_dir )
if type_path == "test":
A_ = od / """test_results.txt"""
A_ = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A_ = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
A_ = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=A_ )
generations_file.parent.mkdir(exist_ok=A_ )
with open(A_ , """a+""" ) as writer:
for key in sorted(A_ ):
if key in ["log", "progress_bar", "preds"]:
continue
A_ = metrics[key]
if isinstance(A_ , torch.Tensor ):
A_ = val.item()
A_ = f'''{key}: {val:.6f}\n'''
writer.write(A_ )
if not save_generations:
return
if "preds" in metrics:
A_ = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(A_ )
@rank_zero_only
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
try:
A_ = pl_module.model.model.num_parameters()
except AttributeError:
A_ = pl_module.model.num_parameters()
A_ = count_trainable_parameters(A_ )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(A_ , A_ , """test""" )
@rank_zero_only
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 371 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = tempfile.mkdtemp()
# fmt: off
A_ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A_ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A_ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A_ = {"""unk_token""": """<unk>"""}
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase__ ) )
A_ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
A_ = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def snake_case_ ( self , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = self.get_image_processor()
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
A_ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
A_ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
A_ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A_ = self.prepare_image_inputs()
A_ = image_processor(UpperCamelCase__ , return_tensors="""np""" )
A_ = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A_ = """lower newer"""
A_ = processor(text=UpperCamelCase__ )
A_ = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A_ = """lower newer"""
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.batch_decode(UpperCamelCase__ )
A_ = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A_ = """lower newer"""
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 101 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__lowerCAmelCase = logging.getLogger(__name__)
def snake_case_ ( snake_case , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = False , ) -> Any:
lowercase__: Tuple = bnb_quantization_config.load_in_abit
lowercase__: str = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
lowercase__: Any = []
# custom device map
if isinstance(snake_case , snake_case ) and len(device_map.keys() ) > 1:
lowercase__: Optional[Any] = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowercase__: Any = get_keys_to_not_convert(snake_case )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(snake_case )
lowercase__: Dict = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowercase__: int = []
lowercase__: Union[str, Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(snake_case )
# compatibility with peft
lowercase__: str = load_in_abit
lowercase__: int = load_in_abit
lowercase__: Optional[int] = get_parameter_device(snake_case )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
lowercase__: List[str] = replace_with_bnb_layers(snake_case , snake_case , modules_to_not_convert=snake_case )
# convert param to the right dtype
lowercase__: Union[str, Any] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowercase__: Dict = name.replace('.weight' , '' ).replace('.bias' , '' )
lowercase__: Dict = getattr(snake_case , snake_case , snake_case )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(snake_case ):
param.to(snake_case )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
f'The model device type is {model_device.type}. However, cuda is needed for quantization.'
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
lowercase__: Optional[int] = replace_with_bnb_layers(
snake_case , snake_case , modules_to_not_convert=snake_case )
lowercase__: Optional[int] = get_quantized_model_device_map(
snake_case , snake_case , snake_case , max_memory=snake_case , no_split_module_classes=snake_case , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowercase__: int = True
lowercase__: Optional[int] = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
snake_case , snake_case , snake_case , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case , offload_state_dict=snake_case , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(snake_case , device_map=snake_case , offload_dir=snake_case )
def snake_case_ ( snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None ) -> Dict:
if device_map is None:
if torch.cuda.is_available():
lowercase__: Any = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(snake_case , snake_case ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
lowercase__: Optional[int] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowercase__: str = {}
lowercase__: Union[str, Any] = special_dtypes
lowercase__: str = no_split_module_classes
lowercase__: int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowercase__: Dict = get_balanced_memory(
snake_case , low_zero=(device_map == 'balanced_low_0') , max_memory=snake_case , **snake_case , )
lowercase__: Dict = max_memory
lowercase__: int = infer_auto_device_map(snake_case , **snake_case )
if isinstance(snake_case , snake_case ):
# check if don't have any quantized module on the cpu
lowercase__: List[str] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowercase__: int = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def snake_case_ ( snake_case , snake_case , snake_case=None , snake_case=None ) -> str:
if modules_to_not_convert is None:
lowercase__: Any = []
lowercase__ , lowercase__: Any = _replace_with_bnb_layers(
snake_case , snake_case , snake_case , snake_case )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def snake_case_ ( snake_case , snake_case , snake_case=None , snake_case=None , ) -> Union[str, Any]:
lowercase__: Tuple = False
for name, module in model.named_children():
if current_key_name is None:
lowercase__: Dict = []
current_key_name.append(snake_case )
if isinstance(snake_case , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowercase__: List[str] = '.'.join(snake_case )
lowercase__: Union[str, Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowercase__: Union[str, Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowercase__: Dict = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowercase__: Optional[int] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
lowercase__: Optional[int] = module.weight.data
if module.bias is not None:
lowercase__: Union[str, Any] = module.bias.data
bnb_module.requires_grad_(snake_case )
setattr(snake_case , snake_case , snake_case )
lowercase__: Dict = True
if len(list(module.children() ) ) > 0:
lowercase__ , lowercase__: List[Any] = _replace_with_bnb_layers(
snake_case , snake_case , snake_case , snake_case )
lowercase__: int = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def snake_case_ ( snake_case ) -> List[Any]:
# Create a copy of the model
with init_empty_weights():
lowercase__: Dict = deepcopy(snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowercase__: List[str] = find_tied_parameters(snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case , snake_case ):
lowercase__: int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase__: Optional[Any] = sum(snake_case , [] )
lowercase__: Optional[Any] = len(snake_case ) > 0
# Check if it is a base model
lowercase__: int = False
if hasattr(snake_case , 'base_model_prefix' ):
lowercase__: Any = not hasattr(snake_case , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase__: str = list(model.named_children() )
lowercase__: Dict = [list_modules[-1][0]]
# add last module together with tied weights
lowercase__: Optional[Any] = set(snake_case ) - set(snake_case )
lowercase__: Union[str, Any] = list(set(snake_case ) ) + list(snake_case )
# remove ".weight" from the keys
lowercase__: str = ['.weight', '.bias']
lowercase__: Optional[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase__: Dict = name.replace(snake_case , '' )
filtered_module_names.append(snake_case )
return filtered_module_names
def snake_case_ ( snake_case ) -> Dict:
for m in model.modules():
if isinstance(snake_case , bnb.nn.Linearabit ):
return True
return False
def snake_case_ ( snake_case ) -> List[str]:
return next(parameter.parameters() ).device
def snake_case_ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Dict:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(snake_case , snake_case , 0 , dtype=snake_case , value=snake_case )
lowercase__: str = param_name
lowercase__: Optional[Any] = model
if "." in tensor_name:
lowercase__: Any = tensor_name.split('.' )
for split in splits[:-1]:
lowercase__: Optional[Any] = getattr(snake_case , snake_case )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
lowercase__: Optional[Any] = new_module
lowercase__: Any = splits[-1]
# offload weights
lowercase__: int = False
offload_weight(module._parameters[tensor_name] , snake_case , snake_case , index=snake_case )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , snake_case , index=snake_case , )
else:
offload_weight(snake_case , snake_case , snake_case , index=snake_case )
offload_weight(snake_case , param_name.replace('weight' , 'SCB' ) , snake_case , index=snake_case )
set_module_tensor_to_device(snake_case , snake_case , 'meta' , dtype=snake_case , value=torch.empty(*param.size() ) )
| 196 |
import math
def snake_case_ ( snake_case , snake_case ) -> float:
return math.pow(snake_case , 2 ) - a
def snake_case_ ( snake_case ) -> float:
return 2 * x
def snake_case_ ( snake_case ) -> float:
lowercase__: Dict = 2.0
while start <= a:
lowercase__: str = math.pow(snake_case , 2 )
return start
def snake_case_ ( snake_case , snake_case = 99_99 , snake_case = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ) -> float:
if a < 0:
raise ValueError('math domain error' )
lowercase__: Tuple = get_initial_point(snake_case )
for _ in range(snake_case ):
lowercase__: List[Any] = value
lowercase__: Any = value - fx(snake_case , snake_case ) / fx_derivative(snake_case )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 196 | 1 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : int = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase : Dict = "data2vec-audio"
def __init__( self : Dict , lowerCamelCase : Optional[int]=32 , lowerCamelCase : int=768 , lowerCamelCase : Any=12 , lowerCamelCase : List[str]=12 , lowerCamelCase : Tuple=3072 , lowerCamelCase : Dict="gelu" , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Any=0.0 , lowerCamelCase : str=0.1 , lowerCamelCase : str=0.1 , lowerCamelCase : Any=0.02 , lowerCamelCase : int=1E-5 , lowerCamelCase : Optional[Any]="gelu" , lowerCamelCase : Optional[int]=(512, 512, 512, 512, 512, 512, 512) , lowerCamelCase : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase : Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase : Optional[int]=False , lowerCamelCase : Any=16 , lowerCamelCase : int=19 , lowerCamelCase : str=5 , lowerCamelCase : List[str]=0.05 , lowerCamelCase : Tuple=10 , lowerCamelCase : List[str]=2 , lowerCamelCase : int=0.0 , lowerCamelCase : Any=10 , lowerCamelCase : int=0 , lowerCamelCase : str="sum" , lowerCamelCase : Optional[Any]=False , lowerCamelCase : Optional[Any]=False , lowerCamelCase : Optional[Any]=256 , lowerCamelCase : Optional[Any]=(512, 512, 512, 512, 1500) , lowerCamelCase : List[Any]=(5, 3, 3, 1, 1) , lowerCamelCase : Union[str, Any]=(1, 2, 3, 1, 1) , lowerCamelCase : Tuple=512 , lowerCamelCase : List[str]=0 , lowerCamelCase : str=1 , lowerCamelCase : List[Any]=2 , lowerCamelCase : List[str]=False , lowerCamelCase : int=3 , lowerCamelCase : str=2 , lowerCamelCase : Optional[int]=3 , lowerCamelCase : Any=None , **lowerCamelCase : Dict , ) -> Tuple:
super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case )
__snake_case : int = hidden_size
__snake_case : List[str] = feat_extract_activation
__snake_case : Union[str, Any] = list(_snake_case )
__snake_case : Union[str, Any] = list(_snake_case )
__snake_case : Optional[int] = list(_snake_case )
__snake_case : Union[str, Any] = conv_bias
__snake_case : Union[str, Any] = num_conv_pos_embeddings
__snake_case : Union[str, Any] = num_conv_pos_embedding_groups
__snake_case : Dict = conv_pos_kernel_size
__snake_case : List[Any] = len(self.conv_dim )
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Optional[int] = hidden_act
__snake_case : List[str] = num_attention_heads
__snake_case : Optional[int] = hidden_dropout
__snake_case : Any = attention_dropout
__snake_case : Optional[Any] = activation_dropout
__snake_case : Any = feat_proj_dropout
__snake_case : Optional[Any] = final_dropout
__snake_case : str = layerdrop
__snake_case : Union[str, Any] = layer_norm_eps
__snake_case : List[Any] = initializer_range
__snake_case : int = vocab_size
__snake_case : List[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case : List[str] = mask_time_prob
__snake_case : str = mask_time_length
__snake_case : Optional[int] = mask_time_min_masks
__snake_case : Optional[int] = mask_feature_prob
__snake_case : Any = mask_feature_length
__snake_case : Optional[Any] = mask_feature_min_masks
# ctc loss
__snake_case : Any = ctc_loss_reduction
__snake_case : Optional[Any] = ctc_zero_infinity
# adapter
__snake_case : str = add_adapter
__snake_case : Union[str, Any] = adapter_kernel_size
__snake_case : Optional[Any] = adapter_stride
__snake_case : List[Any] = num_adapter_layers
__snake_case : str = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__snake_case : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__snake_case : int = list(_snake_case )
__snake_case : Tuple = list(_snake_case )
__snake_case : Any = list(_snake_case )
__snake_case : Dict = xvector_output_dim
@property
def __snake_case ( self : Optional[Any] ) -> List[str]:
return math.prod(self.conv_stride )
| 351 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_snake_case : Optional[Any] = "Create a default config file for Accelerate with only a few flags set."
def lowerCAmelCase_ ( __lowerCamelCase="no" , __lowerCamelCase = default_json_config_file , __lowerCamelCase = False ):
__snake_case : int = Path(__lowerCamelCase )
path.parent.mkdir(parents=__lowerCamelCase , exist_ok=__lowerCamelCase )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
__snake_case : Any = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
__snake_case : Optional[int] = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
__snake_case : Dict = torch.cuda.device_count()
__snake_case : Tuple = num_gpus
__snake_case : List[str] = False
if num_gpus > 1:
__snake_case : Optional[int] = "MULTI_GPU"
else:
__snake_case : Dict = "NO"
elif is_xpu_available() and use_xpu:
__snake_case : List[str] = torch.xpu.device_count()
__snake_case : str = num_xpus
__snake_case : int = False
if num_xpus > 1:
__snake_case : Optional[int] = "MULTI_XPU"
else:
__snake_case : str = "NO"
elif is_npu_available():
__snake_case : Any = torch.npu.device_count()
__snake_case : str = num_npus
__snake_case : str = False
if num_npus > 1:
__snake_case : Optional[int] = "MULTI_NPU"
else:
__snake_case : int = "NO"
else:
__snake_case : List[Any] = 0
__snake_case : Dict = True
__snake_case : Tuple = 1
__snake_case : Tuple = "NO"
__snake_case : str = ClusterConfig(**__lowerCamelCase )
config.to_json_file(__lowerCamelCase )
return path
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[Any] = parser.add_parser("default" , parents=__lowerCamelCase , help=__lowerCamelCase , formatter_class=__lowerCamelCase )
parser.add_argument(
"--config_file" , default=__lowerCamelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=__lowerCamelCase , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=__lowerCamelCase )
return parser
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 134 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__lowerCAmelCase : str = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
__lowerCAmelCase : ClassVar[Features] = Features({'question': Value('string' ), 'context': Value('string' )} )
__lowerCAmelCase : ClassVar[Features] = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
__lowerCAmelCase : str = "question"
__lowerCAmelCase : str = "context"
__lowerCAmelCase : str = "answers"
@property
def SCREAMING_SNAKE_CASE ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 109 |
"""simple docstring"""
from math import isqrt, loga
def _snake_case ( UpperCamelCase : int ):
UpperCAmelCase : Any = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : str = False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def _snake_case ( UpperCamelCase : int = 800800 , UpperCamelCase : int = 800800 ):
UpperCAmelCase : Union[str, Any] = degree * loga(UpperCamelCase )
UpperCAmelCase : int = int(UpperCamelCase )
UpperCAmelCase : Union[str, Any] = calculate_prime_numbers(UpperCamelCase )
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Dict = len(UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109 | 1 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=18 , _lowercase=30 , _lowercase=400 , _lowercase=True , _lowercase=None , _lowercase=True , )-> Tuple:
UpperCamelCase_ = size if size is not None else {"height": 18, "width": 18}
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = image_size
UpperCamelCase_ = min_resolution
UpperCamelCase_ = max_resolution
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_normalize
def UpperCAmelCase_ ( self )-> List[str]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :Union[str, Any] = ImageGPTImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = ImageGPTImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self )-> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , "clusters" ) )
self.assertTrue(hasattr(_lowercase , "do_resize" ) )
self.assertTrue(hasattr(_lowercase , "size" ) )
self.assertTrue(hasattr(_lowercase , "do_normalize" ) )
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase_ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowercase , obj[key] ) )
else:
self.assertEqual(obj[key] , _lowercase )
def UpperCAmelCase_ ( self )-> Tuple:
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_ = os.path.join(_lowercase , "image_processor.json" )
image_processor_first.to_json_file(_lowercase )
UpperCamelCase_ = self.image_processing_class.from_json_file(_lowercase ).to_dict()
UpperCamelCase_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowercase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowercase )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_lowercase )
UpperCamelCase_ = self.image_processing_class.from_pretrained(_lowercase ).to_dict()
UpperCamelCase_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowercase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowercase )
@unittest.skip("ImageGPT requires clusters at initialization" )
def UpperCAmelCase_ ( self )-> str:
pass
def lowerCAmelCase( )-> int:
"""simple docstring"""
UpperCamelCase_ = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
UpperCamelCase_ = Image.open(dataset[4]["file"] )
UpperCamelCase_ = Image.open(dataset[5]["file"] )
UpperCamelCase_ = [imagea, imagea]
return images
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
UpperCamelCase_ = prepare_images()
# test non-batched
UpperCamelCase_ = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
UpperCamelCase_ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _lowercase )
# test batched
UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
UpperCamelCase_ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _lowercase )
| 60 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Dict:
"""simple docstring"""
UpperCamelCase_ = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
UpperCamelCase_ = key.replace("module.encoder" , "glpn.encoder" )
if key.startswith("module.decoder" ):
UpperCamelCase_ = key.replace("module.decoder" , "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase_ = key[key.find("patch_embed" ) + len("patch_embed" )]
UpperCamelCase_ = key.replace(f"patch_embed{idx}" , f"patch_embeddings.{int(SCREAMING_SNAKE_CASE_ )-1}" )
if "norm" in key:
UpperCamelCase_ = key.replace("norm" , "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase_ = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
UpperCamelCase_ = key.replace(f"layer_norm{idx}" , f"layer_norm.{int(SCREAMING_SNAKE_CASE_ )-1}" )
if "layer_norm1" in key:
UpperCamelCase_ = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
UpperCamelCase_ = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase_ = key[key.find("block" ) + len("block" )]
UpperCamelCase_ = key.replace(f"block{idx}" , f"block.{int(SCREAMING_SNAKE_CASE_ )-1}" )
if "attn.q" in key:
UpperCamelCase_ = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
UpperCamelCase_ = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
UpperCamelCase_ = key.replace("attn" , "attention.self" )
if "fc1" in key:
UpperCamelCase_ = key.replace("fc1" , "dense1" )
if "fc2" in key:
UpperCamelCase_ = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
UpperCamelCase_ = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
UpperCamelCase_ = key.replace("linear_fuse.conv" , "linear_fuse" )
UpperCamelCase_ = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase_ = key[key.find("linear_c" ) + len("linear_c" )]
UpperCamelCase_ = key.replace(f"linear_c{idx}" , f"linear_c.{int(SCREAMING_SNAKE_CASE_ )-1}" )
if "bot_conv" in key:
UpperCamelCase_ = key.replace("bot_conv" , "0.convolution" )
if "skip_conv1" in key:
UpperCamelCase_ = key.replace("skip_conv1" , "1.convolution" )
if "skip_conv2" in key:
UpperCamelCase_ = key.replace("skip_conv2" , "2.convolution" )
if "fusion1" in key:
UpperCamelCase_ = key.replace("fusion1" , "1.fusion" )
if "fusion2" in key:
UpperCamelCase_ = key.replace("fusion2" , "2.fusion" )
if "fusion3" in key:
UpperCamelCase_ = key.replace("fusion3" , "3.fusion" )
if "fusion" in key and "conv" in key:
UpperCamelCase_ = key.replace("conv" , "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
UpperCamelCase_ = key.replace("module.last_layer_depth" , "head.head" )
UpperCamelCase_ = value
return new_state_dict
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase_ = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.weight" )
UpperCamelCase_ = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
UpperCamelCase_ = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase_ = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase_ = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase_ = kv_bias[config.hidden_sizes[i] :]
def lowerCAmelCase( )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase_ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None )-> int:
"""simple docstring"""
UpperCamelCase_ = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
UpperCamelCase_ = GLPNImageProcessor()
# prepare image
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
UpperCamelCase_ = torch.load(SCREAMING_SNAKE_CASE_ , map_location=torch.device("cpu" ) )
# rename keys
UpperCamelCase_ = rename_keys(SCREAMING_SNAKE_CASE_ )
# key and value matrices need special treatment
read_in_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# create HuggingFace model and load state dict
UpperCamelCase_ = GLPNForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
# forward pass
UpperCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCamelCase_ = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCamelCase_ = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f"Unknown model name: {model_name}" )
UpperCamelCase_ = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Any = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 60 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __snake_case :
_a : Optional[Any]= MBartConfig
_a : Dict= {}
_a : Union[str, Any]= "gelu"
def __init__( self ,snake_case ,snake_case=13 ,snake_case=7 ,snake_case=True ,snake_case=False ,snake_case=99 ,snake_case=32 ,snake_case=2 ,snake_case=4 ,snake_case=37 ,snake_case=0.1 ,snake_case=0.1 ,snake_case=20 ,snake_case=2 ,snake_case=1 ,snake_case=0 ,):
'''simple docstring'''
lowercase : int = parent
lowercase : Dict = batch_size
lowercase : int = seq_length
lowercase : Optional[int] = is_training
lowercase : Tuple = use_labels
lowercase : List[Any] = vocab_size
lowercase : str = hidden_size
lowercase : int = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : Any = intermediate_size
lowercase : Tuple = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Tuple = eos_token_id
lowercase : Any = pad_token_id
lowercase : str = bos_token_id
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
lowercase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
lowercase : List[str] = tf.concat([input_ids, eos_tensor] ,axis=1 )
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase : Tuple = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
lowercase : List[str] = prepare_mbart_inputs_dict(snake_case_ ,snake_case_ ,snake_case_ )
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : str = TFMBartModel(config=snake_case_ ).get_decoder()
lowercase : List[str] = inputs_dict["""input_ids"""]
lowercase : Any = input_ids[:1, :]
lowercase : Optional[int] = inputs_dict["""attention_mask"""][:1, :]
lowercase : Optional[int] = inputs_dict["""head_mask"""]
lowercase : int = 1
# first forward pass
lowercase : Optional[Any] = model(snake_case_ ,attention_mask=snake_case_ ,head_mask=snake_case_ ,use_cache=snake_case_ )
lowercase , lowercase : List[str] = outputs.to_tuple()
lowercase : List[str] = past_key_values[1]
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ) -> List[Any]:
if attention_mask is None:
lowercase : Tuple = tf.cast(tf.math.not_equal(_lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase : Any = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __snake_case ( A__ , A__ , unittest.TestCase ):
_a : int= (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
_a : Union[str, Any]= (TFMBartForConditionalGeneration,) if is_tf_available() else ()
_a : Tuple= (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
_a : Tuple= True
_a : Any= False
_a : List[Any]= False
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = TFMBartModelTester(self )
lowercase : int = ConfigTester(self ,config_class=snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class __snake_case ( unittest.TestCase ):
_a : Union[str, Any]= [
" UN Chief Says There Is No Military Solution in Syria",
]
_a : Any= [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
_a : Optional[Any]= "facebook/mbart-large-en-ro"
@cached_property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
lowercase : List[Any] = self.translate_src_text(**snake_case_ )
self.assertListEqual(self.expected_text ,snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = self.tokenizer(self.src_text ,**snake_case_ ,return_tensors="""tf""" )
lowercase : Dict = self.model.generate(
model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 )
lowercase : Dict = self.tokenizer.batch_decode(snake_case_ ,skip_special_tokens=snake_case_ )
return generated_words
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 20 |
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 301 | 0 |
"""simple docstring"""
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = graph
self._normalize_graph(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str):
'''simple docstring'''
if sources is int:
SCREAMING_SNAKE_CASE_ : str = [sources]
if sinks is int:
SCREAMING_SNAKE_CASE_ : Tuple = [sinks]
if len(lowercase_) == 0 or len(lowercase_) == 0:
return
SCREAMING_SNAKE_CASE_ : int = sources[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowercase_) > 1 or len(lowercase_) > 1:
SCREAMING_SNAKE_CASE_ : Dict = 0
for i in sources:
max_input_flow += sum(self.graph[i])
SCREAMING_SNAKE_CASE_ : int = len(self.graph) + 1
for room in self.graph:
room.insert(0 , 0)
self.graph.insert(0 , [0] * size)
for i in sources:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_input_flow
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : List[str] = len(self.graph) + 1
for room in self.graph:
room.append(0)
self.graph.append([0] * size)
for i in sinks:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_input_flow
SCREAMING_SNAKE_CASE_ : Tuple = size - 1
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''')
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = algorithm(self)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = flow_network
SCREAMING_SNAKE_CASE_ : Tuple = flow_network.verticesCount
SCREAMING_SNAKE_CASE_ : Dict = flow_network.sourceIndex
SCREAMING_SNAKE_CASE_ : Any = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
SCREAMING_SNAKE_CASE_ : Dict = flow_network.graph
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
if not self.executed:
self._algorithm()
SCREAMING_SNAKE_CASE_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
pass
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : int):
'''simple docstring'''
super().__init__(lowercase_)
# use this to save your result
SCREAMING_SNAKE_CASE_ : Dict = -1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''')
return self.maximum_flow
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Optional[int]):
'''simple docstring'''
super().__init__(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = [[0] * self.verticies_count for i in range(self.verticies_count)]
SCREAMING_SNAKE_CASE_ : int = [0] * self.verticies_count
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [0] * self.verticies_count
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index]):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
SCREAMING_SNAKE_CASE_ : Optional[int] = [
i
for i in range(self.verticies_count)
if i != self.source_index and i != self.sink_index
]
# move through list
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
while i < len(lowercase_):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vertices_list[i]
SCREAMING_SNAKE_CASE_ : Tuple = self.heights[vertex_index]
self.process_vertex(lowercase_)
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
else:
i += 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sum(self.preflow[self.source_index])
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : List[Any]):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowercase_ , lowercase_)
self.relabel(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = None
for to_index in range(self.verticies_count):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.heights[to_index]
if min_height is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = min_height + 1
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = [0]
UpperCAmelCase_ : str = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase_ : List[str] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase_ : Optional[Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase_ : Optional[Any] = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''')
| 367 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase_ : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def _A (__a ) -> Any:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE_ : Optional[Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__a , id=__a )
| 318 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Any = """codegen"""
lowerCAmelCase__ : Union[str, Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__(self : Any , UpperCamelCase : List[Any]=50400 , UpperCamelCase : Optional[Any]=2048 , UpperCamelCase : List[Any]=2048 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : Union[str, Any]=28 , UpperCamelCase : Optional[Any]=16 , UpperCamelCase : Dict=64 , UpperCamelCase : Tuple=None , UpperCamelCase : Optional[int]="gelu_new" , UpperCamelCase : str=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : int=1E-5 , UpperCamelCase : str=0.02 , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Optional[Any]=50256 , UpperCamelCase : Dict=50256 , UpperCamelCase : int=False , **UpperCamelCase : Optional[int] , ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = n_ctx
lowercase__ = n_positions
lowercase__ = n_embd
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = n_inner
lowercase__ = rotary_dim
lowercase__ = activation_function
lowercase__ = resid_pdrop
lowercase__ = embd_pdrop
lowercase__ = attn_pdrop
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
lowercase__ = bos_token_id
lowercase__ = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , tie_word_embeddings=UpperCamelCase , **UpperCamelCase )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : Union[str, Any] , UpperCamelCase : PretrainedConfig , UpperCamelCase : str = "default" , UpperCamelCase : List[PatchingSpec] = None , UpperCamelCase : bool = False , ):
'''simple docstring'''
super().__init__(UpperCamelCase , task=UpperCamelCase , patching_specs=UpperCamelCase , use_past=UpperCamelCase )
if not getattr(self._config , '''pad_token_id''' , UpperCamelCase ):
# TODO: how to do that better?
lowercase__ = 0
@property
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
lowercase__ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase , direction='''inputs''' )
lowercase__ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
return self._config.n_layer
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return self._config.n_head
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
lowercase__ = super(UpperCamelCase , self ).generate_dummy_inputs(
UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase )
# We need to order the input in the way they appears in the forward()
lowercase__ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__ ,lowercase__ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__ = seqlen + 2
lowercase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__ = [
(torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase )) for _ in range(self.num_layers )
]
lowercase__ = common_inputs['''attention_mask''']
if self.use_past:
lowercase__ = ordered_inputs['''attention_mask'''].dtype
lowercase__ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(UpperCamelCase , UpperCamelCase , dtype=UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
return 13
| 2 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = """cvt"""
def __init__(self : int , UpperCamelCase : List[Any]=3 , UpperCamelCase : int=[7, 3, 3] , UpperCamelCase : str=[4, 2, 2] , UpperCamelCase : Dict=[2, 1, 1] , UpperCamelCase : Dict=[64, 192, 384] , UpperCamelCase : Dict=[1, 3, 6] , UpperCamelCase : Dict=[1, 2, 10] , UpperCamelCase : Any=[4.0, 4.0, 4.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : int=[0.0, 0.0, 0.1] , UpperCamelCase : Any=[True, True, True] , UpperCamelCase : int=[False, False, True] , UpperCamelCase : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase : Optional[int]=[3, 3, 3] , UpperCamelCase : Tuple=[1, 1, 1] , UpperCamelCase : Any=[2, 2, 2] , UpperCamelCase : Dict=[1, 1, 1] , UpperCamelCase : List[str]=[1, 1, 1] , UpperCamelCase : str=0.02 , UpperCamelCase : int=1E-12 , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = patch_stride
lowercase__ = patch_padding
lowercase__ = embed_dim
lowercase__ = num_heads
lowercase__ = depth
lowercase__ = mlp_ratio
lowercase__ = attention_drop_rate
lowercase__ = drop_rate
lowercase__ = drop_path_rate
lowercase__ = qkv_bias
lowercase__ = cls_token
lowercase__ = qkv_projection_method
lowercase__ = kernel_qkv
lowercase__ = padding_kv
lowercase__ = stride_kv
lowercase__ = padding_q
lowercase__ = stride_q
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
| 2 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
_A = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_A = model(__UpperCAmelCase )["last_hidden_state"]
_A = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
# compare the actual values for a slice.
_A = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 174 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 174 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase : Dict = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase : str = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase : Union[str, Any] = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase : int = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowerCamelCase : Union[str, Any] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowerCamelCase : Union[str, Any] = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowerCamelCase : List[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase : Optional[int] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase : Union[str, Any] = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class A( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class A( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Union[str, Any] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowerCamelCase : List[Any] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowerCamelCase : List[str] = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
class A:
'''simple docstring'''
def __call__( self : Optional[int] , A_ : str , A_ : Optional[str] = None , A_ : Optional[str] = None , A_ : Union[bool, str] = False , A_ : Union[bool, str] = False , A_ : Optional[int] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : Optional[bool] = None , **A_ : Any , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
elif titles is None or texts is None:
lowerCamelCase_ = titles if texts is None else texts
return super().__call__(
UpperCAmelCase_ , UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase_ = titles if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else [titles]
lowerCamelCase_ = texts if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else [texts]
lowerCamelCase_ = len(UpperCAmelCase_ )
lowerCamelCase_ = questions if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else [questions] * n_passages
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
f"""There should be as many titles than texts but got {len(UpperCAmelCase_ )} titles and {len(UpperCAmelCase_ )} texts.""" )
lowerCamelCase_ = super().__call__(UpperCAmelCase_ , UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )["input_ids"]
lowerCamelCase_ = super().__call__(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )["input_ids"]
lowerCamelCase_ = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase_ , UpperCAmelCase_ )
]
}
if return_attention_mask is not False:
lowerCamelCase_ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ = attention_mask
return self.pad(UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
def a__ ( self : List[Any] , A_ : BatchEncoding , A_ : DPRReaderOutput , A_ : int = 16 , A_ : int = 64 , A_ : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
lowerCamelCase_ = reader_input["input_ids"]
lowerCamelCase_ = reader_output[:3]
lowerCamelCase_ = len(UpperCAmelCase_ )
lowerCamelCase_ = sorted(range(UpperCAmelCase_ ) , reverse=UpperCAmelCase_ , key=relevance_logits.__getitem__ )
lowerCamelCase_ = []
for doc_id in sorted_docs:
lowerCamelCase_ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ = sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ = len(UpperCAmelCase_ )
lowerCamelCase_ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase_ , top_spans=UpperCAmelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase_ , start_index=UpperCAmelCase_ , end_index=UpperCAmelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCAmelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self : Union[str, Any] , A_ : List[int] , A_ : List[int] , A_ : int , A_ : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
lowerCamelCase_ = []
for start_index, start_score in enumerate(UpperCAmelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ = sorted(UpperCAmelCase_ , key=lambda A_ : x[1] , reverse=UpperCAmelCase_ )
lowerCamelCase_ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
lowerCamelCase_ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCAmelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class A( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ['''input_ids''', '''attention_mask''']
| 204 |
from typing import Any
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> list:
"""simple docstring"""
_validation(
__a , __a , __a , __a , __a , )
# Creates data structures and fill initial step
lowerCamelCase__: dict ={}
lowerCamelCase__: dict ={}
for state in states_space:
lowerCamelCase__: Optional[Any] =observations_space[0]
lowerCamelCase__: List[Any] =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase__: int =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__a ) ):
lowerCamelCase__: Tuple =observations_space[o]
lowerCamelCase__: Optional[Any] =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase__: Tuple =""
lowerCamelCase__: Optional[Any] =-1
for k_state in states_space:
lowerCamelCase__: int =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase__: List[str] =probability
lowerCamelCase__: int =k_state
# Update probabilities and pointers dicts
lowerCamelCase__: Any =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase__: int =arg_max
# The final observation
lowerCamelCase__: Any =observations_space[len(__a ) - 1]
# argmax for given final observation
lowerCamelCase__: Optional[Any] =""
lowerCamelCase__: int =-1
for k_state in states_space:
lowerCamelCase__: Tuple =probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase__: List[Any] =probability
lowerCamelCase__: Dict =k_state
lowerCamelCase__: str =arg_max
# Process pointers backwards
lowerCamelCase__: Union[str, Any] =last_state
lowerCamelCase__: List[str] =[]
for o in range(len(__a ) - 1 , -1 , -1 ):
result.append(__a )
lowerCamelCase__: Union[str, Any] =pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_not_empty(
__a , __a , __a , __a , __a , )
_validate_lists(__a , __a )
_validate_dicts(
__a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_list(__a , "observations_space" )
_validate_list(__a , "states_space" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Tuple =F"""{var_name} must be a list"""
raise ValueError(__a )
else:
for x in _object:
if not isinstance(__a , __a ):
lowerCamelCase__: str =F"""{var_name} must be a list of strings"""
raise ValueError(__a )
def lowerCAmelCase_ ( __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_dict(__a , "initial_probabilities" , __a )
_validate_nested_dict(__a , "transition_probabilities" )
_validate_nested_dict(__a , "emission_probabilities" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_dict(_object , __a , __a )
for x in _object.values():
_validate_dict(__a , __a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Optional[int] =F"""{var_name} must be a dict"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object ):
lowerCamelCase__: Tuple =F"""{var_name} all keys must be strings"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object.values() ):
lowerCamelCase__: Dict ="nested dictionary " if nested else ""
lowerCamelCase__: List[str] =F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
UpperCamelCase__ = len(UpperCamelCase__ )
UpperCamelCase__ = max(UpperCamelCase__ )
UpperCamelCase__ = min(UpperCamelCase__ )
# create the counting array
UpperCamelCase__ = coll_max + 1 - coll_min
UpperCamelCase__ = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1, UpperCamelCase__ ):
UpperCamelCase__ = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCamelCase__ = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0, UpperCamelCase__ ) ):
UpperCamelCase__ = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCamelCase_ ( UpperCamelCase__ : Dict ):
'''simple docstring'''
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowercase = input("""Enter numbers separated by a comma:\n""").strip()
lowercase = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted))
| 357 | from __future__ import annotations
lowercase = list[list[int]]
# assigning initial values to the grid
lowercase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowercase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase_ ( UpperCamelCase__ : Matrix, UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ):
'''simple docstring'''
if location := find_empty_location(UpperCamelCase__ ):
UpperCamelCase__ , UpperCamelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1, 10 ):
if is_safe(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
UpperCamelCase__ = digit
if sudoku(UpperCamelCase__ ) is not None:
return grid
UpperCamelCase__ = 0
return None
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(UpperCamelCase__, end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 2_0)
print_solution(example_grid)
print("""\nExample grid solution:""")
lowercase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 35 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : int = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class A ( __snake_case ):
__magic_name__ = '''bridgetower_vision_model'''
def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=288 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=1e-05 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : List[str] = hidden_size
A : Optional[int] = num_hidden_layers
A : Dict = num_channels
A : Tuple = patch_size
A : Optional[Any] = image_size
A : Dict = initializer_factor
A : List[Any] = layer_norm_eps
A : Optional[int] = stop_gradient
A : Optional[Any] = share_layernorm
A : Optional[int] = remove_last_layer
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
A, A : Any = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if config_dict.get('''model_type''' ) == "bridgetower":
A : List[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''bridgetower_text_model'''
def __init__( self , SCREAMING_SNAKE_CASE=50265 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=514 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=1e-05 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : List[Any] = vocab_size
A : int = hidden_size
A : List[Any] = num_hidden_layers
A : Any = num_attention_heads
A : Any = hidden_act
A : Union[str, Any] = initializer_factor
A : Dict = intermediate_size
A : Dict = hidden_dropout_prob
A : List[str] = attention_probs_dropout_prob
A : Any = max_position_embeddings
A : int = type_vocab_size
A : List[Any] = layer_norm_eps
A : List[Any] = position_embedding_type
A : List[Any] = use_cache
A : List[str] = pad_token_id
A : Dict = bos_token_id
A : Tuple = eos_token_id
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
A, A : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if config_dict.get('''model_type''' ) == "bridgetower":
A : List[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''bridgetower'''
def __init__( self , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=1e-05 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="add" , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = kwargs.pop('''text_config_dict''' , SCREAMING_SNAKE_CASE )
A : Dict = kwargs.pop('''vision_config_dict''' , SCREAMING_SNAKE_CASE )
super().__init__(**SCREAMING_SNAKE_CASE )
A : int = share_cross_modal_transformer_layers
A : str = hidden_act
A : List[Any] = hidden_size
A : List[Any] = initializer_factor
A : List[Any] = layer_norm_eps
A : Optional[Any] = share_link_tower_layers
A : Optional[Any] = link_tower_type
A : Union[str, Any] = num_attention_heads
A : str = num_hidden_layers
A : int = tie_word_embeddings
A : Optional[Any] = init_layernorm_from_vision_encoder
if text_config is None:
A : Any = {}
logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''' )
if vision_config is None:
A : List[str] = {}
logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''' )
A : Union[str, Any] = BridgeTowerTextConfig(**SCREAMING_SNAKE_CASE )
A : Dict = BridgeTowerVisionConfig(**SCREAMING_SNAKE_CASE )
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : List[str] = copy.deepcopy(self.__dict__ )
A : List[str] = self.text_config.to_dict()
A : Tuple = self.vision_config.to_dict()
A : Any = self.__class__.model_type
return output
| 3 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : str = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class A ( __snake_case ):
__magic_name__ = '''bert'''
def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Optional[int] = vocab_size
A : Optional[Any] = hidden_size
A : List[Any] = num_hidden_layers
A : List[str] = num_attention_heads
A : Dict = hidden_act
A : Optional[Any] = intermediate_size
A : List[Any] = hidden_dropout_prob
A : List[Any] = attention_probs_dropout_prob
A : Optional[Any] = max_position_embeddings
A : List[str] = type_vocab_size
A : Dict = initializer_range
A : str = layer_norm_eps
A : int = position_embedding_type
A : Dict = use_cache
A : str = classifier_dropout
class A ( __snake_case ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 3 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19 | 1 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
UpperCAmelCase_ : Tuple = Vector()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(lowercase_ ) , "(0,0,0,0,0,1)" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(lowercase_ ) , 4 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = Vector([1, 2] )
UpperCAmelCase_ : Union[str, Any] = Vector([1, 2, 3, 4, 5] )
UpperCAmelCase_ : str = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
UpperCAmelCase_ : Optional[Any] = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Vector([1, 2, 3] )
UpperCAmelCase_ : Tuple = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = Vector([1, 2, 3] )
UpperCAmelCase_ : List[str] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = Vector([1, 2, 3] )
UpperCAmelCase_ : int = Vector([2, -1, 4] ) # for test of dot product
UpperCAmelCase_ : int = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = Vector([1, 2, 3] )
UpperCAmelCase_ : Union[str, Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , lowercase_ , lowercase_ ) ) , "(3,4,7)" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = Vector([1, 0, 0, 0, 0, 0] )
UpperCAmelCase_ : Optional[Any] = x.copy()
self.assertEqual(str(lowercase_ ) , str(lowercase_ ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(lowercase_ ) , "(0,1,0)" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(lowercase_ ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase_ : Tuple = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(lowercase_ , lowercase_ ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase_ : Optional[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(lowercase_ , lowercase_ ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
UpperCAmelCase_ : int = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(lowercase_ ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase_ : str = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase_ : List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 61 |
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return round(float(moles / volume ) * nfactor )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 | 1 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCAmelCase__ : Any = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __UpperCamelCase ( _UpperCAmelCase ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
return max(metric_fn(_UpperCAmelCase, _UpperCAmelCase ) for gt in ground_truths )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : List[Any] = [line.strip() for line in open(_UpperCAmelCase, "r" ).readlines()]
__UpperCAmelCase : Union[str, Any] = []
if args.gold_data_mode == "qa":
__UpperCAmelCase : Union[str, Any] = pd.read_csv(_UpperCAmelCase, sep="\t", header=_UpperCAmelCase )
for answer_list in data[1]:
__UpperCAmelCase : Optional[Any] = ast.literal_eval(_UpperCAmelCase )
answers.append(_UpperCAmelCase )
else:
__UpperCAmelCase : str = [line.strip() for line in open(_UpperCAmelCase, "r" ).readlines()]
__UpperCAmelCase : Tuple = [[reference] for reference in references]
__UpperCAmelCase : List[str] = 0
for prediction, ground_truths in zip(_UpperCAmelCase, _UpperCAmelCase ):
total += 1
em += metric_max_over_ground_truths(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
fa += metric_max_over_ground_truths(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
__UpperCAmelCase : List[str] = 100.0 * em / total
__UpperCAmelCase : Optional[Any] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : Union[str, Any] = args.k
__UpperCAmelCase : str = [line.strip() for line in open(_UpperCAmelCase, "r" ).readlines()]
__UpperCAmelCase : List[Any] = [line.strip() for line in open(_UpperCAmelCase, "r" ).readlines()]
__UpperCAmelCase : Optional[Any] = 0
for hypo, reference in zip(_UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : Dict = set(hypo.split("\t" )[:k] )
__UpperCAmelCase : Dict = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__UpperCAmelCase : Optional[Any] = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
def strip_title(_UpperCAmelCase ):
if title.startswith("\"" ):
__UpperCAmelCase : List[str] = title[1:]
if title.endswith("\"" ):
__UpperCAmelCase : str = title[:-1]
return title
__UpperCAmelCase : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_UpperCAmelCase, return_tensors="pt", padding=_UpperCAmelCase, truncation=_UpperCAmelCase, )["input_ids"].to(args.device )
__UpperCAmelCase : int = rag_model.rag.question_encoder(_UpperCAmelCase )
__UpperCAmelCase : List[Any] = question_enc_outputs[0]
__UpperCAmelCase : str = rag_model.retriever(
_UpperCAmelCase, question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy(), prefix=rag_model.rag.generator.config.prefix, n_docs=rag_model.config.n_docs, return_tensors="pt", )
__UpperCAmelCase : Dict = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__UpperCAmelCase : Union[str, Any] = []
for docs in all_docs:
__UpperCAmelCase : Optional[Any] = [strip_title(_UpperCAmelCase ) for title in docs["title"]]
provenance_strings.append("\t".join(_UpperCAmelCase ) )
return provenance_strings
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
with torch.no_grad():
__UpperCAmelCase : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_UpperCAmelCase, return_tensors="pt", padding=_UpperCAmelCase, truncation=_UpperCAmelCase )
__UpperCAmelCase : int = inputs_dict.input_ids.to(args.device )
__UpperCAmelCase : Optional[int] = inputs_dict.attention_mask.to(args.device )
__UpperCAmelCase : Union[str, Any] = rag_model.generate( # rag_model overwrites generate
_UpperCAmelCase, attention_mask=_UpperCAmelCase, num_beams=args.num_beams, min_length=args.min_length, max_length=args.max_length, early_stopping=_UpperCAmelCase, num_return_sequences=1, bad_words_ids=[[0, 0]], )
__UpperCAmelCase : Optional[int] = rag_model.retriever.generator_tokenizer.batch_decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
if args.print_predictions:
for q, a in zip(_UpperCAmelCase, _UpperCAmelCase ):
logger.info("Q: {} - A: {}".format(_UpperCAmelCase, _UpperCAmelCase ) )
return answers
def __UpperCamelCase ( ):
__UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
"--model_type", choices=["rag_sequence", "rag_token", "bart"], type=_UpperCAmelCase, help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
), )
parser.add_argument(
"--index_name", default=_UpperCAmelCase, choices=["exact", "compressed", "legacy"], type=_UpperCAmelCase, help="RAG model retriever type", )
parser.add_argument(
"--index_path", default=_UpperCAmelCase, type=_UpperCAmelCase, help="Path to the retrieval index", )
parser.add_argument("--n_docs", default=5, type=_UpperCAmelCase, help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path", default=_UpperCAmelCase, type=_UpperCAmelCase, required=_UpperCAmelCase, help="Path to pretrained checkpoints or model identifier from huggingface.co/models", )
parser.add_argument(
"--eval_mode", choices=["e2e", "retrieval"], default="e2e", type=_UpperCAmelCase, help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
), )
parser.add_argument("--k", default=1, type=_UpperCAmelCase, help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set", default=_UpperCAmelCase, type=_UpperCAmelCase, required=_UpperCAmelCase, help="Path to a file containing evaluation samples", )
parser.add_argument(
"--gold_data_path", default=_UpperCAmelCase, type=_UpperCAmelCase, required=_UpperCAmelCase, help="Path to a tab-separated file with gold samples", )
parser.add_argument(
"--gold_data_mode", default="qa", type=_UpperCAmelCase, choices=["qa", "ans"], help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
), )
parser.add_argument(
"--predictions_path", type=_UpperCAmelCase, default="predictions.txt", help="Name of the predictions file, to be stored in the checkpoints directory", )
parser.add_argument(
"--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", )
parser.add_argument(
"--eval_batch_size", default=8, type=_UpperCAmelCase, help="Batch size per GPU/CPU for evaluation.", )
parser.add_argument(
"--recalculate", help="Recalculate predictions even if the prediction file exists", action="store_true", )
parser.add_argument(
"--num_beams", default=4, type=_UpperCAmelCase, help="Number of beams to be used when generating answers", )
parser.add_argument("--min_length", default=1, type=_UpperCAmelCase, help="Min length of the generated answers" )
parser.add_argument("--max_length", default=50, type=_UpperCAmelCase, help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions", action="store_true", help="If True, prints predictions while evaluating.", )
parser.add_argument(
"--print_docs", action="store_true", help="If True, prints docs retried while generating.", )
__UpperCAmelCase : Optional[Any] = parser.parse_args()
__UpperCAmelCase : Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Dict = {}
if args.model_type is None:
__UpperCAmelCase : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
__UpperCAmelCase : Tuple = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
__UpperCAmelCase : Optional[int] = args.n_docs
if args.index_name is not None:
__UpperCAmelCase : str = args.index_name
if args.index_path is not None:
__UpperCAmelCase : List[Any] = args.index_path
else:
__UpperCAmelCase : int = BartForConditionalGeneration
__UpperCAmelCase : str = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s", _UpperCAmelCase )
__UpperCAmelCase : List[Any] = get_scores if args.eval_mode == "e2e" else get_precision_at_k
__UpperCAmelCase : Dict = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(_UpperCAmelCase, args.predictions_path, args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(_UpperCAmelCase ) )
logger.info(" Batch size = %d", args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
__UpperCAmelCase : List[Any] = RagRetriever.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = model_class.from_pretrained(_UpperCAmelCase, retriever=_UpperCAmelCase, **_UpperCAmelCase )
model.retriever.init_retrieval()
else:
__UpperCAmelCase : Tuple = model_class.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
model.to(args.device )
with open(args.evaluation_set, "r" ) as eval_file, open(args.predictions_path, "w" ) as preds_file:
__UpperCAmelCase : str = []
for line in tqdm(_UpperCAmelCase ):
questions.append(line.strip() )
if len(_UpperCAmelCase ) == args.eval_batch_size:
__UpperCAmelCase : str = evaluate_batch_fn(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
preds_file.write("\n".join(_UpperCAmelCase ) + "\n" )
preds_file.flush()
__UpperCAmelCase : Optional[Any] = []
if len(_UpperCAmelCase ) > 0:
__UpperCAmelCase : Optional[Any] = evaluate_batch_fn(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
preds_file.write("\n".join(_UpperCAmelCase ) )
preds_file.flush()
score_fn(_UpperCAmelCase, args.predictions_path, args.gold_data_path )
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = get_args()
main(args)
| 37 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : Any = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Tuple = PegasusTokenizer(UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def lowerCamelCase_ ( self : List[Any] , **UpperCAmelCase_ : List[str] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : int ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "</s>"
__UpperCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(UpperCAmelCase_ ) , 1_103 )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_103 )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Tuple = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
__UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
__UpperCAmelCase : int = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__UpperCAmelCase : Tuple = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
__UpperCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
__UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : Dict = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
__UpperCAmelCase : Tuple = "To ensure a smooth flow of bank resolutions."
__UpperCAmelCase : str = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
__UpperCAmelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ["This is going to be way too long." * 150, "short example"]
__UpperCAmelCase : Optional[int] = ["not super long but more than 5 tokens", "tiny"]
__UpperCAmelCase : str = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
__UpperCAmelCase : Union[str, Any] = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_ ) == 2 # input_ids, attention_mask.
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
# fmt: off
__UpperCAmelCase : Tuple = {"input_ids": [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : List[str] = PegasusTokenizer(UpperCAmelCase_ , offset=0 , mask_token_sent=UpperCAmelCase_ , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase_ : int ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : List[str] = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
__UpperCAmelCase : str = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
__UpperCAmelCase : int = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_torch
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = ["This is going to be way too long." * 1_000, "short example"]
__UpperCAmelCase : List[Any] = ["not super long but more than 5 tokens", "tiny"]
__UpperCAmelCase : int = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
__UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_ ) == 2 # input_ids, attention_mask.
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
__UpperCAmelCase : int = self._large_tokenizer(UpperCAmelCase_ ).input_ids
self.assertListEqual(
UpperCAmelCase_ , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , )
| 37 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE__ : str = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE__ : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __magic_name__ ( __lowerCAmelCase : str ) -> str:
if "://" in dataset_path:
__lowerCamelCase = dataset_path.split('''://''' )[1]
return dataset_path
def __magic_name__ ( __lowerCAmelCase : fsspec.AbstractFileSystem ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __magic_name__ ( __lowerCAmelCase : fsspec.AbstractFileSystem , __lowerCAmelCase : str , __lowerCAmelCase : str ) -> Optional[int]:
__lowerCamelCase = not is_remote_filesystem(__lowerCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__lowerCAmelCase ) , fs._strip_protocol(__lowerCAmelCase ) )
else:
fs.mv(__lowerCAmelCase , __lowerCAmelCase , recursive=__lowerCAmelCase )
def __magic_name__ ( ) -> None:
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = threading.Lock()
| 270 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowerCAmelCase__ :
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Dict:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__lowerCamelCase = img
__lowerCamelCase = img.shape[1]
__lowerCamelCase = img.shape[0]
__lowerCamelCase = dst_width
__lowerCamelCase = dst_height
__lowerCamelCase = self.src_w / self.dst_w
__lowerCamelCase = self.src_h / self.dst_h
__lowerCamelCase = __lowerCamelCase = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def __A ( self : List[Any] ) -> str:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__lowerCamelCase = self.img[self.get_y(SCREAMING_SNAKE_CASE__ )][self.get_x(SCREAMING_SNAKE_CASE__ )]
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(self.ratio_x * x )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = 800, 600
SCREAMING_SNAKE_CASE__ : int = imread("image_data/lena.jpg", 1)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 270 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = 10
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = [1, 2, 3, 4]
_UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
_UpperCAmelCase , _UpperCAmelCase = process_story(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [] )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = """"""
_UpperCAmelCase , _UpperCAmelCase = process_story(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [] )
self.assertEqual(__lowerCAmelCase , [] )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
_UpperCAmelCase , _UpperCAmelCase = process_story(__lowerCAmelCase )
_UpperCAmelCase = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = ["""It was the best of times."""]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 0 ).numpy() , expected.numpy() )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 23 ).numpy() , expected.numpy() )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 1 ).numpy() , expected.numpy() )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = 101
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_UpperCAmelCase = compute_token_type_ids(__lowerCAmelCase , __lowerCAmelCase )
np.testing.assert_array_equal(__lowerCAmelCase , __lowerCAmelCase )
| 363 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30 | 0 |
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
while b:
__lowerCamelCase , __lowerCamelCase = b, a % b
return a
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
return a if b == 0 else euclidean_gcd_recursive(__lowerCAmelCase , a % b )
def __magic_name__ ( ) -> List[str]:
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 270 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
SCREAMING_SNAKE_CASE__ : int = None
def __magic_name__ ( ) -> str:
__lowerCamelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__lowerCAmelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__lowerCAmelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __magic_name__ ( __lowerCAmelCase : Dict ) -> Optional[Any]:
def remove_articles(__lowerCAmelCase : Optional[int] ):
return ARTICLES_REGEX.sub(''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase : Union[str, Any] ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Optional[int]:
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> int:
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> str:
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
__lowerCamelCase = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> Optional[Any]:
__lowerCamelCase = {}
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = qa['''id''']
__lowerCamelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowerCamelCase = ['''''']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
__lowerCamelCase = preds[qid]
# Take max over all gold answers
__lowerCamelCase = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
__lowerCamelCase = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
__lowerCamelCase = {}
for qid, s in scores.items():
__lowerCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
__lowerCamelCase = float(not qid_to_has_ans[qid] )
else:
__lowerCamelCase = s
return new_scores
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None ) -> Union[str, Any]:
if not qid_list:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> int:
for k in new_eval:
__lowerCamelCase = new_eval[k]
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
plt.step(__lowerCAmelCase , __lowerCAmelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None ) -> int:
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
__lowerCamelCase = 0.0
__lowerCamelCase = 1.0
__lowerCamelCase = 0.0
__lowerCamelCase = [1.0]
__lowerCamelCase = [0.0]
__lowerCamelCase = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowerCamelCase = true_pos / float(i + 1 )
__lowerCamelCase = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ) -> List[Any]:
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
__lowerCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__lowerCamelCase = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_exact''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_f1''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_oracle''' )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> Optional[Any]:
if not qid_list:
return
__lowerCamelCase = [na_probs[k] for k in qid_list]
__lowerCamelCase = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCAmelCase , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowerCamelCase = num_no_ans
__lowerCamelCase = cur_score
__lowerCamelCase = 0.0
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowerCamelCase = scores[qid]
else:
if preds[qid]:
__lowerCamelCase = -1
else:
__lowerCamelCase = 0
cur_score += diff
if cur_score > best_score:
__lowerCamelCase = cur_score
__lowerCamelCase = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> int:
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = best_exact
__lowerCamelCase = exact_thresh
__lowerCamelCase = best_fa
__lowerCamelCase = fa_thresh
def __magic_name__ ( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
__lowerCamelCase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
else:
__lowerCamelCase = {k: 0.0 for k in preds}
__lowerCamelCase = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if v]
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
__lowerCamelCase , __lowerCamelCase = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''HasAns''' )
if no_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 270 | 1 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __lowercase ( snake_case_ : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
__A , __A : Optional[Any] = image.size
__A , __A : Any = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__A : int = image.resize((w, h) ,resample=PIL_INTERPOLATION['''lanczos'''] )
__A : List[str] = np.array(snake_case_ ).astype(np.floataa ) / 255.0
__A : List[str] = image[None].transpose(0 ,3 ,1 ,2 )
__A : Optional[int] = torch.from_numpy(snake_case_ )
return 2.0 * image - 1.0
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 100 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ):
'''simple docstring'''
if isinstance(__lowerCamelCase , PIL.Image.Image ):
__A : List[str] = 1
elif isinstance(__lowerCamelCase , torch.Tensor ):
__A : Optional[int] = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase )}""" )
if isinstance(__lowerCamelCase , PIL.Image.Image ):
__A : Optional[int] = preprocess(__lowerCamelCase )
__A , __A : Tuple = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__A : List[str] = (batch_size, self.unet.config.in_channels // 2, height, width)
__A : str = next(self.unet.parameters() ).dtype
__A : Tuple = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase )
__A : List[str] = image.to(device=self.device , dtype=__lowerCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device )
__A : str = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__A : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__A : Tuple = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__A : Optional[int] = {}
if accepts_eta:
__A : List[str] = eta
for t in self.progress_bar(__lowerCamelCase ):
# concat latents and low resolution image in the channel dimension.
__A : List[Any] = torch.cat([latents, image] , dim=1 )
__A : Union[str, Any] = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
# predict the noise residual
__A : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__A : Union[str, Any] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
# decode the image latents with the VQVAE
__A : int = self.vqvae.decode(__lowerCamelCase ).sample
__A : Union[str, Any] = torch.clamp(__lowerCamelCase , -1.0 , 1.0 )
__A : List[str] = image / 2 + 0.5
__A : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__A : Optional[Any] = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 291 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """"""
_lowerCamelCase = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , __lowerCamelCase = None , __lowerCamelCase = None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(self , **__lowerCamelCase )
__A : int = repo_info
__A : Optional[int] = token
__A : int = None
def UpperCamelCase__( self ):
'''simple docstring'''
if self.dir_cache is None:
__A : int = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__A : Tuple = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCamelCase ): {'''name''': str(__lowerCamelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = "rb" , **__lowerCamelCase , ):
'''simple docstring'''
if not isinstance(self.repo_info , __lowerCamelCase ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__A : Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCamelCase , mode=__lowerCamelCase , headers=get_authentication_headers_for_url(__lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def UpperCamelCase__( self , __lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
self._get_dirs()
__A : Optional[Any] = self._strip_protocol(__lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=False , **__lowerCamelCase ):
'''simple docstring'''
self._get_dirs()
__A : Any = PurePosixPath(path.strip('''/''' ) )
__A : Any = {}
for p, f in self.dir_cache.items():
__A : List[Any] = PurePosixPath(p.strip('''/''' ) )
__A : Dict = p.parent
if root == path:
__A : Union[str, Any] = f
__A : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 291 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.