code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
def lowercase__( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
if len(__SCREAMING_SNAKE_CASE ) == 0:
return False
lowercase_ : Any = len(__SCREAMING_SNAKE_CASE ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __SCREAMING_SNAKE_CASE )
else:
return binary_search(a_list[midpoint + 1 :] , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("Enter numbers separated by comma:\n").strip()
__SCREAMING_SNAKE_CASE =[int(item.strip()) for item in user_input.split(",")]
__SCREAMING_SNAKE_CASE =int(input("Enter the number to be found in the list:\n").strip())
__SCREAMING_SNAKE_CASE ="" if binary_search(sequence, target) else "not "
print(F"{target} was {not_str}found in {sequence}")
| 425
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase ( lowercase_ ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(transformer=__UpperCamelCase ,vae=__UpperCamelCase ,scheduler=__UpperCamelCase )
# create a imagenet -> id dictionary for easier use
lowercase_ : Union[str, Any] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
lowercase_ : Dict = int(__UpperCamelCase )
lowercase_ : Dict = dict(sorted(self.labels.items() ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]:
'''simple docstring'''
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Tuple = list(__UpperCamelCase )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self ,__UpperCamelCase ,__UpperCamelCase = 4.0 ,__UpperCamelCase = None ,__UpperCamelCase = 50 ,__UpperCamelCase = "pil" ,__UpperCamelCase = True ,) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
lowercase_ : Dict = len(__UpperCamelCase )
lowercase_ : List[Any] = self.transformer.config.sample_size
lowercase_ : Optional[Any] = self.transformer.config.in_channels
lowercase_ : List[str] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) ,generator=__UpperCamelCase ,device=self.device ,dtype=self.transformer.dtype ,)
lowercase_ : Union[str, Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase_ : Union[str, Any] = torch.tensor(__UpperCamelCase ,device=self.device ).reshape(-1 )
lowercase_ : Dict = torch.tensor([1000] * batch_size ,device=self.device )
lowercase_ : Union[str, Any] = torch.cat([class_labels, class_null] ,0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase_ : Tuple = latent_model_input[: len(__UpperCamelCase ) // 2]
lowercase_ : Union[str, Any] = torch.cat([half, half] ,dim=0 )
lowercase_ : Optional[int] = self.scheduler.scale_model_input(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = t
if not torch.is_tensor(__UpperCamelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase_ : Optional[int] = latent_model_input.device.type == 'mps'
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
lowercase_ : Optional[int] = torch.intaa if is_mps else torch.intaa
lowercase_ : Optional[int] = torch.tensor([timesteps] ,dtype=__UpperCamelCase ,device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase_ : str = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase_ : Dict = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase_ : Optional[Any] = self.transformer(
__UpperCamelCase ,timestep=__UpperCamelCase ,class_labels=__UpperCamelCase ).sample
# perform guidance
if guidance_scale > 1:
lowercase_ , lowercase_ : int = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase_ , lowercase_ : Optional[int] = torch.split(__UpperCamelCase ,len(__UpperCamelCase ) // 2 ,dim=0 )
lowercase_ : Any = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase_ : Any = torch.cat([half_eps, half_eps] ,dim=0 )
lowercase_ : int = torch.cat([eps, rest] ,dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase_ , lowercase_ : List[Any] = torch.split(__UpperCamelCase ,__UpperCamelCase ,dim=1 )
else:
lowercase_ : Union[str, Any] = noise_pred
# compute previous image: x_t -> x_t-1
lowercase_ : List[Any] = self.scheduler.step(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ).prev_sample
if guidance_scale > 1:
lowercase_ , lowercase_ : Union[str, Any] = latent_model_input.chunk(2 ,dim=0 )
else:
lowercase_ : Optional[int] = latent_model_input
lowercase_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
lowercase_ : Optional[Any] = self.vae.decode(__UpperCamelCase ).sample
lowercase_ : str = (samples / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase_ : str = samples.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
lowercase_ : Tuple = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 425
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : int = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class __snake_case( __A ):
_A = '''deberta-v2'''
def __init__( self , A_=128_100 , A_=1_536 , A_=24 , A_=24 , A_=6_144 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0 , A_=0.02 , A_=1e-7 , A_=False , A_=-1 , A_=0 , A_=True , A_=None , A_=0 , A_="gelu" , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = max_relative_positions
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = position_biased_input
# Backwards compatibility
if type(A_ ) == str:
_SCREAMING_SNAKE_CASE = [x.strip() for x in pos_att_type.lower().split('''|''' )]
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = kwargs.get('''pooler_hidden_size''' , A_ )
_SCREAMING_SNAKE_CASE = pooler_dropout
_SCREAMING_SNAKE_CASE = pooler_hidden_act
class __snake_case( __A ):
@property
def A ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_SCREAMING_SNAKE_CASE = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def A ( self ):
'''simple docstring'''
return 12
def A ( self , A_ , A_ = -1 , A_ = -1 , A_ = -1 , A_ = False , A_ = None , A_ = 3 , A_ = 40 , A_ = 40 , A_ = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = super().generate_dummy_inputs(preprocessor=A_ , framework=A_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 717
|
"""simple docstring"""
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = int(UpperCamelCase__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = divmod(UpperCamelCase__ , 2 )
return binary_recursive(UpperCamelCase__ ) + str(UpperCamelCase__ )
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = str(UpperCamelCase__ ).strip()
if not number:
raise ValueError('''No input value was provided''' )
_SCREAMING_SNAKE_CASE = '''-''' if number.startswith('''-''' ) else ''''''
_SCREAMING_SNAKE_CASE = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return F'''{negative}0b{binary_recursive(int(UpperCamelCase__ ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 168
| 0
|
'''simple docstring'''
import unittest
import numpy as np
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ):
'''simple docstring'''
A : Tuple = np.shape(snake_case__ )
A : Union[str, Any] = np.shape(snake_case__ )
A : int = np.shape(snake_case__ )
if shape_a[0] != shape_b[0]:
A : str = (
'''Expected the same number of rows for A and B. '''
F'Instead found A of size {shape_a} and B of size {shape_b}'
)
raise ValueError(snake_case__ )
if shape_b[1] != shape_c[1]:
A : Union[str, Any] = (
'''Expected the same number of columns for B and C. '''
F'Instead found B of size {shape_b} and C of size {shape_c}'
)
raise ValueError(snake_case__ )
A : str = pseudo_inv
if a_inv is None:
try:
A : Any = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> None:
"""simple docstring"""
A : Union[str, Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A : Any = np.array([[0, 3], [3, 0], [2, 3]] )
A : Union[str, Any] = np.array([[2, 1], [6, 3]] )
A : List[str] = schur_complement(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : str = np.block([[a, b], [b.T, c]] )
A : Union[str, Any] = np.linalg.det(SCREAMING_SNAKE_CASE )
A : Optional[Any] = np.linalg.det(SCREAMING_SNAKE_CASE )
A : List[Any] = np.linalg.det(SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(SCREAMING_SNAKE_CASE , det_a * det_s )
def __lowerCAmelCase ( self ) -> None:
"""simple docstring"""
A : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
A : Any = np.array([[2, 1], [6, 3]] )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
schur_complement(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> None:
"""simple docstring"""
A : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
A : Any = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
schur_complement(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 634
|
'''simple docstring'''
from random import randint, random
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = False , snake_case__ = 5 , ):
'''simple docstring'''
A : List[str] = [[-1] * number_of_cells] # Create a highway without any car
A : Optional[Any] = 0
A : Tuple = max(snake_case__ , 0 )
while i < number_of_cells:
A : Optional[int] = (
randint(0 , snake_case__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = 0
A : str = highway_now[car_index + 1 :]
for cell in range(len(snake_case__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(snake_case__ , -1 )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = len(snake_case__ )
# Beforce calculations, the highway is empty
A : int = [-1] * number_of_cells
for car_index in range(snake_case__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
A : Optional[Any] = min(highway_now[car_index] + 1 , snake_case__ )
# Number of empty cell before the next car
A : List[str] = get_distance(snake_case__ , snake_case__ ) - 1
# We can't have the car causing an accident
A : Union[str, Any] = min(next_highway[car_index] , snake_case__ )
if random() < probability:
# Randomly, a driver will slow down
A : List[str] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[Any] = len(highway[0] )
for i in range(snake_case__ ):
A : int = update(highway[i] , snake_case__ , snake_case__ )
A : Any = [-1] * number_of_cells
for car_index in range(snake_case__ ):
A : Dict = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
A : Dict = (car_index + speed) % number_of_cells
# Commit the change of position
A : List[str] = speed
highway.append(snake_case__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634
| 1
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __UpperCamelCase( _A : bytes , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = F'''{sampling_rate}'''
UpperCAmelCase__ : Union[str, Any] = '''1'''
UpperCAmelCase__ : List[str] = '''f32le'''
UpperCAmelCase__ : Any = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(_A , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase__ : Union[str, Any] = ffmpeg_process.communicate(_A )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
UpperCAmelCase__ : int = output_stream[0]
UpperCAmelCase__ : Optional[int] = np.frombuffer(_A , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def __UpperCamelCase( _A : int , _A : float , _A : str = "f32le" , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = F'''{sampling_rate}'''
UpperCAmelCase__ : Optional[Any] = '''1'''
if format_for_conversion == "s16le":
UpperCAmelCase__ : Union[str, Any] = 2
elif format_for_conversion == "f32le":
UpperCAmelCase__ : List[str] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCAmelCase__ : Dict = platform.system()
if system == "Linux":
UpperCAmelCase__ : Optional[int] = '''alsa'''
UpperCAmelCase__ : Optional[int] = '''default'''
elif system == "Darwin":
UpperCAmelCase__ : int = '''avfoundation'''
UpperCAmelCase__ : Any = ''':0'''
elif system == "Windows":
UpperCAmelCase__ : List[Any] = '''dshow'''
UpperCAmelCase__ : Dict = '''default'''
UpperCAmelCase__ : Any = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
UpperCAmelCase__ : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase__ : int = _ffmpeg_stream(_A , _A )
for item in iterator:
yield item
def __UpperCamelCase( _A : int , _A : float , _A : Optional[int] = None , _A : Optional[Union[Tuple[float, float], float]] = None , _A : str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
UpperCAmelCase__ : int = stream_chunk_s
else:
UpperCAmelCase__ : List[Any] = chunk_length_s
UpperCAmelCase__ : int = ffmpeg_microphone(_A , _A , format_for_conversion=_A )
if format_for_conversion == "s16le":
UpperCAmelCase__ : int = np.intaa
UpperCAmelCase__ : Optional[int] = 2
elif format_for_conversion == "f32le":
UpperCAmelCase__ : List[str] = np.floataa
UpperCAmelCase__ : List[Any] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCAmelCase__ : List[Any] = chunk_length_s / 6
UpperCAmelCase__ : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_A , (int, float) ):
UpperCAmelCase__ : str = [stride_length_s, stride_length_s]
UpperCAmelCase__ : int = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase__ : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase__ : Dict = datetime.datetime.now()
UpperCAmelCase__ : List[str] = datetime.timedelta(seconds=_A )
for item in chunk_bytes_iter(_A , _A , stride=(stride_left, stride_right) , stream=_A ):
# Put everything back in numpy scale
UpperCAmelCase__ : Tuple = np.frombuffer(item['''raw'''] , dtype=_A )
UpperCAmelCase__ : List[Any] = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
UpperCAmelCase__ : Optional[int] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __UpperCamelCase( _A : Tuple , _A : int , _A : Tuple[int, int] , _A : bool = False ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = B''''''
UpperCAmelCase__ : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCAmelCase__ : Dict = 0
for raw in iterator:
acc += raw
if stream and len(_A ) < chunk_len:
UpperCAmelCase__ : List[str] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_A ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase__ : int = (_stride_left, stride_right)
UpperCAmelCase__ : int = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
UpperCAmelCase__ : Optional[Any] = False
yield item
UpperCAmelCase__ : Tuple = stride_left
UpperCAmelCase__ : Optional[int] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_A ) > stride_left:
UpperCAmelCase__ : List[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
UpperCAmelCase__ : List[str] = False
yield item
def __UpperCamelCase( _A : List[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = 2**24 # 16Mo
try:
with subprocess.Popen(_A , stdout=subprocess.PIPE , bufsize=_A ) as ffmpeg_process:
while True:
UpperCAmelCase__ : Dict = ffmpeg_process.stdout.read(_A )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 718
|
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCamelCase__ : Optional[int] = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __UpperCamelCase( _A : Dict , _A : List[Any] , _A : List[Any] , _A : Optional[int] , _A : Dict=False , _A : List[Any]=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
UpperCAmelCase__ : str = cached_file(_A , _A , force_download=not use_cached_models )
UpperCAmelCase__ : Optional[Any] = config_class.from_json_file(_A )
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : List[str] = True
print(F'''Building TensorFlow model from configuration: {config}''' )
UpperCAmelCase__ : Tuple = model_class(_A )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
UpperCAmelCase__ : Optional[int] = cached_file(
_A , _A , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
UpperCAmelCase__ : int = load_pytorch_checkpoint_in_tfa_model(_A , _A )
if compare_with_pt_model:
UpperCAmelCase__ : Optional[int] = tf_model(tf_model.dummy_inputs , training=_A ) # build the network
UpperCAmelCase__ : Union[str, Any] = torch.load(_A , map_location='''cpu''' )
UpperCAmelCase__ : Union[str, Any] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=_A , config=_A , state_dict=_A )
with torch.no_grad():
UpperCAmelCase__ : int = pt_model(**pt_model.dummy_inputs )
UpperCAmelCase__ : List[str] = pto[0].numpy()
UpperCAmelCase__ : Union[str, Any] = tfo[0].numpy()
UpperCAmelCase__ : Optional[int] = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(_A , save_format='''h5''' )
def __UpperCamelCase( _A : Any , _A : Union[str, Any] , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=False , _A : str=False , _A : Optional[Any]=False , _A : List[str]=False , ):
'''simple docstring'''
if args_model_type is None:
UpperCAmelCase__ : Any = list(MODEL_CLASSES.keys() )
else:
UpperCAmelCase__ : List[Any] = [args_model_type]
for j, model_type in enumerate(_A , start=1 ):
print('''=''' * 1_00 )
print(F''' Converting model type {j}/{len(_A )}: {model_type}''' )
print('''=''' * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
UpperCAmelCase__ : int = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
UpperCAmelCase__ : Tuple = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(_A , _A ) , start=1 ):
print('''-''' * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
UpperCAmelCase__ : Tuple = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(_A )}: {model_shortcut_name} - model_type {model_type}''' )
print('''-''' * 1_00 )
if config_shortcut_name in aws_config_map:
UpperCAmelCase__ : Optional[int] = cached_file(_A , _A , force_download=not use_cached_models )
else:
UpperCAmelCase__ : Optional[Any] = config_shortcut_name
if model_shortcut_name in aws_model_maps:
UpperCAmelCase__ : Any = cached_file(_A , _A , force_download=not use_cached_models )
else:
UpperCAmelCase__ : List[str] = model_shortcut_name
if os.path.isfile(_A ):
UpperCAmelCase__ : Optional[int] = '''converted_model'''
convert_pt_checkpoint_to_tf(
model_type=_A , pytorch_checkpoint_path=_A , config_file=_A , tf_dump_path=os.path.join(_A , model_shortcut_name + '''-tf_model.h5''' ) , compare_with_pt_model=_A , )
if remove_cached_files:
os.remove(_A )
os.remove(_A )
if __name__ == "__main__":
UpperCamelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
UpperCamelCase__ : List[str] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 496
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : List[str] = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 210
|
'''simple docstring'''
from __future__ import annotations
import math
_lowercase : Dict = """2020.9.26"""
_lowercase : Any = """xcodz-dot, cclaus, dhruvmanila"""
def lowerCamelCase__ ( A : float , A : float , A : float , A : float , A : float ):
'''simple docstring'''
if not all(isinstance(A , (float, int) ) for val in locals().values() ):
UpperCAmelCase = f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(A )
UpperCAmelCase = ((x * distance) / (z + distance)) * scale
UpperCAmelCase = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase__ ( A : float , A : float , A : float , A : str , A : float ):
'''simple docstring'''
if not isinstance(A , A ):
raise TypeError('''Axis must be a str''' )
UpperCAmelCase = locals()
del input_variables["axis"]
if not all(isinstance(A , (float, int) ) for val in input_variables.values() ):
UpperCAmelCase = (
'''Input values except axis must either be float or int: '''
f"""{list(input_variables.values() )}"""
)
raise TypeError(A )
UpperCAmelCase = (angle % 3_60) / 4_50 * 1_80 / math.pi
if axis == "z":
UpperCAmelCase = x * math.cos(A ) - y * math.sin(A )
UpperCAmelCase = y * math.cos(A ) + x * math.sin(A )
UpperCAmelCase = z
elif axis == "x":
UpperCAmelCase = y * math.cos(A ) - z * math.sin(A )
UpperCAmelCase = z * math.cos(A ) + y * math.sin(A )
UpperCAmelCase = x
elif axis == "y":
UpperCAmelCase = x * math.cos(A ) - z * math.sin(A )
UpperCAmelCase = z * math.cos(A ) + x * math.sin(A )
UpperCAmelCase = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
| 210
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_lowerCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def UpperCamelCase ( a , a , a , a , a ) -> Dict:
'''simple docstring'''
for attribute in key.split('''.''' ):
__magic_name__ = getattr(a , a )
if weight_type is not None:
__magic_name__ = getattr(a , a ).shape
else:
__magic_name__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__magic_name__ = value
elif weight_type == "weight_g":
__magic_name__ = value
elif weight_type == "weight_v":
__magic_name__ = value
elif weight_type == "bias":
__magic_name__ = value
elif weight_type == "running_mean":
__magic_name__ = value
elif weight_type == "running_var":
__magic_name__ = value
elif weight_type == "num_batches_tracked":
__magic_name__ = value
elif weight_type == "inv_freq":
__magic_name__ = value
else:
__magic_name__ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCamelCase ( a , a , a ) -> Tuple:
'''simple docstring'''
__magic_name__ = []
__magic_name__ = fairseq_model.state_dict()
__magic_name__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__magic_name__ = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == '''group''' , )
__magic_name__ = True
else:
for key, mapped_key in MAPPING.items():
__magic_name__ = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__magic_name__ = True
if "*" in mapped_key:
__magic_name__ = name.split(a )[0].split('''.''' )[-2]
__magic_name__ = mapped_key.replace('''*''' , a )
if "pos_bias_u" in name:
__magic_name__ = None
elif "pos_bias_v" in name:
__magic_name__ = None
elif "weight_g" in name:
__magic_name__ = '''weight_g'''
elif "weight_v" in name:
__magic_name__ = '''weight_v'''
elif "bias" in name:
__magic_name__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__magic_name__ = '''weight'''
elif "running_mean" in name:
__magic_name__ = '''running_mean'''
elif "inv_freq" in name:
__magic_name__ = '''inv_freq'''
elif "running_var" in name:
__magic_name__ = '''running_var'''
elif "num_batches_tracked" in name:
__magic_name__ = '''num_batches_tracked'''
else:
__magic_name__ = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCamelCase ( a , a , a , a , a ) -> Any:
'''simple docstring'''
__magic_name__ = full_name.split('''conv_layers.''' )[-1]
__magic_name__ = name.split('''.''' )
__magic_name__ = int(items[0] )
__magic_name__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__magic_name__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__magic_name__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__magic_name__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__magic_name__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(a )
@torch.no_grad()
def UpperCamelCase ( a , a , a=None , a=None , a=True ) -> List[str]:
'''simple docstring'''
if config_path is not None:
__magic_name__ = WavaVecaConformerConfig.from_pretrained(a , hidden_act='''swish''' )
else:
__magic_name__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__magic_name__ = '''rotary'''
if is_finetuned:
if dict_path:
__magic_name__ = Dictionary.load(a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__magic_name__ = target_dict.pad_index
__magic_name__ = target_dict.bos_index
__magic_name__ = target_dict.eos_index
__magic_name__ = len(target_dict.symbols )
__magic_name__ = os.path.join(a , '''vocab.json''' )
if not os.path.isdir(a ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(a ) )
return
os.makedirs(a , exist_ok=a )
__magic_name__ = target_dict.indices
# fairseq has the <pad> and <s> switched
__magic_name__ = 0
__magic_name__ = 1
with open(a , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(a , a )
__magic_name__ = WavaVecaCTCTokenizer(
a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=a , )
__magic_name__ = True if config.feat_extract_norm == '''layer''' else False
__magic_name__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=a , return_attention_mask=a , )
__magic_name__ = WavaVecaProcessor(feature_extractor=a , tokenizer=a )
processor.save_pretrained(a )
__magic_name__ = WavaVecaConformerForCTC(a )
else:
__magic_name__ = WavaVecaConformerForPreTraining(a )
if is_finetuned:
__magic_name__ , __magic_name__ , __magic_name__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__magic_name__ = argparse.Namespace(task='''audio_pretraining''' )
__magic_name__ = fairseq.tasks.setup_task(a )
__magic_name__ , __magic_name__ , __magic_name__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=a )
__magic_name__ = model[0].eval()
recursively_load_weights(a , a , not is_finetuned )
hf_wavavec.save_pretrained(a )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_lowerCAmelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 245
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Optional[Any] = TextToVideoSDPipeline
__SCREAMING_SNAKE_CASE :str = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE :Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__SCREAMING_SNAKE_CASE :Union[str, Any] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def snake_case__ ( self : List[Any] ):
torch.manual_seed(0 )
__magic_name__ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
__magic_name__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
__magic_name__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__magic_name__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
__magic_name__ = CLIPTextModel(a__ )
__magic_name__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__magic_name__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def snake_case__ ( self : List[str] , a__ : List[str] , a__ : int=0 ):
if str(a__ ).startswith('''mps''' ):
__magic_name__ = torch.manual_seed(a__ )
else:
__magic_name__ = torch.Generator(device=a__ ).manual_seed(a__ )
__magic_name__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def snake_case__ ( self : str ):
__magic_name__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__magic_name__ = self.get_dummy_components()
__magic_name__ = TextToVideoSDPipeline(**a__ )
__magic_name__ = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
__magic_name__ = self.get_dummy_inputs(a__ )
__magic_name__ = '''np'''
__magic_name__ = sd_pipe(**a__ ).frames
__magic_name__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__magic_name__ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case__ ( self : Tuple ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=a__ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def snake_case__ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a__ , expected_max_diff=1E-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def snake_case__ ( self : Optional[Any] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def snake_case__ ( self : Optional[Any] ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def snake_case__ ( self : Tuple ):
pass
def snake_case__ ( self : Union[str, Any] ):
return super().test_progress_bar()
@slow
@skip_mps
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : List[str] ):
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__magic_name__ = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__magic_name__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__magic_name__ = pipe.to('''cuda''' )
__magic_name__ = '''Spiderman is surfing'''
__magic_name__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
__magic_name__ = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type='''pt''' ).frames
__magic_name__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__magic_name__ = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__magic_name__ = pipe.to('''cuda''' )
__magic_name__ = '''Spiderman is surfing'''
__magic_name__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
__magic_name__ = pipe(a__ , generator=a__ , num_inference_steps=2 , output_type='''pt''' ).frames
__magic_name__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 245
| 1
|
def _lowerCAmelCase ( UpperCamelCase__: int = 50 ) -> int:
"""simple docstring"""
A = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 641
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = (IPNDMScheduler,)
lowerCAmelCase = (('num_inference_steps', 5_0),)
def _UpperCAmelCase ( self , **a__ ) -> List[str]:
A = {"""num_train_timesteps""": 1000}
config.update(**a__ )
return config
def _UpperCAmelCase ( self , a__=0 , **a__ ) -> Tuple:
A = dict(self.forward_default_kwargs )
A = kwargs.pop("""num_inference_steps""" , a__ )
A = self.dummy_sample
A = 0.1 * sample
A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config(**a__ )
A = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
A = dummy_past_residuals[:]
if time_step is None:
A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
A = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
A = dummy_past_residuals[:]
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self , a__=0 , **a__ ) -> int:
A = dict(self.forward_default_kwargs )
A = kwargs.pop("""num_inference_steps""" , a__ )
A = self.dummy_sample
A = 0.1 * sample
A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config()
A = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
A = dummy_past_residuals[:]
if time_step is None:
A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
A = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
A = dummy_past_residuals[:]
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self , **a__ ) -> List[Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config(**a__ )
A = scheduler_class(**a__ )
A = 10
A = self.dummy_model()
A = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
A = model(a__ , a__ )
A = scheduler.step(a__ , a__ , a__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A = model(a__ , a__ )
A = scheduler.step(a__ , a__ , a__ ).prev_sample
return sample
def _UpperCAmelCase ( self ) -> Any:
A = dict(self.forward_default_kwargs )
A = kwargs.pop("""num_inference_steps""" , a__ )
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config()
A = scheduler_class(**a__ )
A = self.dummy_sample
A = 0.1 * sample
if num_inference_steps is not None and hasattr(a__ , """set_timesteps""" ):
scheduler.set_timesteps(a__ )
elif num_inference_steps is not None and not hasattr(a__ , """set_timesteps""" ):
A = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A = dummy_past_residuals[:]
A = scheduler.timesteps[5]
A = scheduler.timesteps[6]
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
A = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCAmelCase ( self ) -> Tuple:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=a__ , time_step=a__ )
def _UpperCAmelCase ( self ) -> Any:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a__ , time_step=a__ )
def _UpperCAmelCase ( self ) -> int:
A = self.full_loop()
A = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 641
| 1
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __UpperCAmelCase:
"""simple docstring"""
__magic_name__ = BlenderbotConfig
__magic_name__ = {}
__magic_name__ = "gelu"
def __init__( self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=False , __magic_name__=99 , __magic_name__=32 , __magic_name__=2 , __magic_name__=4 , __magic_name__=37 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=20 , __magic_name__=2 , __magic_name__=1 , __magic_name__=0 , ):
"""simple docstring"""
A_ : List[Any] = parent
A_ : List[str] = batch_size
A_ : Tuple = seq_length
A_ : str = is_training
A_ : List[str] = use_labels
A_ : List[Any] = vocab_size
A_ : Dict = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : Tuple = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : List[Any] = eos_token_id
A_ : Union[str, Any] = pad_token_id
A_ : Union[str, Any] = bos_token_id
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A_ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A_ : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A_ : Union[str, Any] = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
A_ : str = TFBlenderbotModel(config=UpperCamelCase_ ).get_decoder()
A_ : Union[str, Any] = inputs_dict["input_ids"]
A_ : Dict = input_ids[:1, :]
A_ : int = inputs_dict["attention_mask"][:1, :]
A_ : Union[str, Any] = inputs_dict["head_mask"]
A_ : Union[str, Any] = 1
# first forward pass
A_ : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
A_ : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A_ : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A_ : str = tf.concat([input_ids, next_tokens] , axis=-1 )
A_ : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A_ : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
A_ : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A_ : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A_ : Tuple = output_from_no_past[:, -3:, random_slice_idx]
A_ : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1e-3 )
def a__ ( a , a , a , a=None , a=None , a=None , a=None , a=None , ) -> Union[str, Any]:
if attention_mask is None:
A_ : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A_ : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A_ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __UpperCAmelCase( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__magic_name__ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__magic_name__ = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Optional[int] = TFBlenderbotModelTester(self )
A_ : str = ConfigTester(self , config_class=UpperCamelCase_ )
def UpperCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
@require_tokenizers
@require_tf
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = ["My friends are cool but they eat too many carbs."]
__magic_name__ = "facebook/blenderbot-400M-distill"
@cached_property
def UpperCAmelCase ( self ):
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[str] = self.tokenizer(self.src_text , return_tensors='''tf''' )
A_ : List[str] = self.model.generate(
model_inputs.input_ids , )
A_ : int = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 710
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def a__ ( a , a , a ) -> Any:
A_ : List[Any] = WavaVecaForSequenceClassification.from_pretrained(a , config=a )
A_ : str = downstream_dict['''projector.weight''']
A_ : Dict = downstream_dict['''projector.bias''']
A_ : str = downstream_dict['''model.post_net.linear.weight''']
A_ : Optional[Any] = downstream_dict['''model.post_net.linear.bias''']
return model
def a__ ( a , a , a ) -> Optional[int]:
A_ : List[str] = WavaVecaForAudioFrameClassification.from_pretrained(a , config=a )
A_ : Any = downstream_dict['''model.linear.weight''']
A_ : str = downstream_dict['''model.linear.bias''']
return model
def a__ ( a , a , a ) -> Optional[int]:
A_ : Union[str, Any] = WavaVecaForXVector.from_pretrained(a , config=a )
A_ : Any = downstream_dict['''connector.weight''']
A_ : Dict = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A_ : Dict = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
A_ : List[str] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
A_ : Optional[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
A_ : str = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
A_ : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
A_ : List[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
A_ : Union[str, Any] = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def a__ ( a , a , a , a ) -> str:
A_ : List[Any] = torch.load(a , map_location='''cpu''' )
A_ : int = checkpoint['''Downstream''']
A_ : Union[str, Any] = WavaVecaConfig.from_pretrained(a )
A_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
a , return_attention_mask=a , do_normalize=a )
A_ : List[Any] = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
A_ : str = convert_classification(a , a , a )
elif arch.endswith('''ForAudioFrameClassification''' ):
A_ : Tuple = convert_diarization(a , a , a )
elif arch.endswith('''ForXVector''' ):
A_ : Dict = convert_xvector(a , a , a )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
A_ : List[Any] = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(a )
hf_model.save_pretrained(a )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
_lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 236
| 0
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 199
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["""DeiTFeatureExtractor"""]
SCREAMING_SNAKE_CASE = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 199
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _UpperCAmelCase ( _snake_case):
__lowercase : torch.FloatTensor
class _UpperCAmelCase ( _snake_case , _snake_case):
@register_to_config
def __init__( self , snake_case_ = 6_55_36 , snake_case_ = None , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 0 , snake_case_ = "fourier" , snake_case_ = True , snake_case_ = False , snake_case_ = 0.0 , snake_case_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , snake_case_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , snake_case_ = "UNetMidBlock1D" , snake_case_ = None , snake_case_ = (32, 32, 64) , snake_case_ = None , snake_case_ = 8 , snake_case_ = 1 , snake_case_ = False , ):
super().__init__()
_snake_case : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
_snake_case : List[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=snake_case_ , log=snake_case_ , flip_sin_to_cos=snake_case_ )
_snake_case : int = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_snake_case : Optional[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=snake_case_ , downscale_freq_shift=snake_case_ )
_snake_case : List[Any] = block_out_channels[0]
if use_timestep_embedding:
_snake_case : Dict = block_out_channels[0] * 4
_snake_case : str = TimestepEmbedding(
in_channels=snake_case_ , time_embed_dim=snake_case_ , act_fn=snake_case_ , out_dim=block_out_channels[0] , )
_snake_case : int = nn.ModuleList([] )
_snake_case : Any = None
_snake_case : str = nn.ModuleList([] )
_snake_case : Tuple = None
# down
_snake_case : str = in_channels
for i, down_block_type in enumerate(snake_case_ ):
_snake_case : Optional[int] = output_channel
_snake_case : List[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_snake_case : Union[str, Any] = i == len(snake_case_ ) - 1
_snake_case : Optional[Any] = get_down_block(
snake_case_ , num_layers=snake_case_ , in_channels=snake_case_ , out_channels=snake_case_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(snake_case_ )
# mid
_snake_case : Any = get_mid_block(
snake_case_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=snake_case_ , add_downsample=snake_case_ , )
# up
_snake_case : int = list(reversed(snake_case_ ) )
_snake_case : List[Any] = reversed_block_out_channels[0]
if out_block_type is None:
_snake_case : Optional[Any] = out_channels
else:
_snake_case : List[str] = block_out_channels[0]
for i, up_block_type in enumerate(snake_case_ ):
_snake_case : Any = output_channel
_snake_case : Tuple = (
reversed_block_out_channels[i + 1] if i < len(snake_case_ ) - 1 else final_upsample_channels
)
_snake_case : str = i == len(snake_case_ ) - 1
_snake_case : Union[str, Any] = get_up_block(
snake_case_ , num_layers=snake_case_ , in_channels=snake_case_ , out_channels=snake_case_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(snake_case_ )
_snake_case : Tuple = output_channel
# out
_snake_case : List[str] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_snake_case : Any = get_out_block(
out_block_type=snake_case_ , num_groups_out=snake_case_ , embed_dim=block_out_channels[0] , out_channels=snake_case_ , act_fn=snake_case_ , fc_dim=block_out_channels[-1] // 4 , )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ = True , ):
_snake_case : Optional[int] = timestep
if not torch.is_tensor(snake_case_ ):
_snake_case : List[Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(snake_case_ ) and len(timesteps.shape ) == 0:
_snake_case : str = timesteps[None].to(sample.device )
_snake_case : Tuple = self.time_proj(snake_case_ )
if self.config.use_timestep_embedding:
_snake_case : Any = self.time_mlp(snake_case_ )
else:
_snake_case : Tuple = timestep_embed[..., None]
_snake_case : Any = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_snake_case : int = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_snake_case : Optional[Any] = ()
for downsample_block in self.down_blocks:
_snake_case , _snake_case : str = downsample_block(hidden_states=snake_case_ , temb=snake_case_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_snake_case : List[str] = self.mid_block(snake_case_ , snake_case_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_snake_case : Any = down_block_res_samples[-1:]
_snake_case : Dict = down_block_res_samples[:-1]
_snake_case : Dict = upsample_block(snake_case_ , res_hidden_states_tuple=snake_case_ , temb=snake_case_ )
# 5. post-process
if self.out_block:
_snake_case : List[Any] = self.out_block(snake_case_ , snake_case_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=snake_case_ )
| 87
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( _snake_case , unittest.TestCase):
__lowercase : Any = TextToVideoSDPipeline
__lowercase : str = TEXT_TO_IMAGE_PARAMS
__lowercase : int = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__lowercase : Optional[int] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
])
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_snake_case : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
_snake_case : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_snake_case : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_snake_case : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_snake_case : Tuple = CLIPTextModel(snake_case_ )
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_snake_case : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def lowerCamelCase__ ( self , snake_case_ , snake_case_=0 ):
if str(snake_case_ ).startswith("mps" ):
_snake_case : str = torch.manual_seed(snake_case_ )
else:
_snake_case : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_snake_case : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def lowerCamelCase__ ( self ):
_snake_case : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[Any] = self.get_dummy_components()
_snake_case : Tuple = TextToVideoSDPipeline(**snake_case_ )
_snake_case : List[str] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : int = self.get_dummy_inputs(snake_case_ )
_snake_case : Union[str, Any] = "np"
_snake_case : Dict = sd_pipe(**snake_case_ ).frames
_snake_case : Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_snake_case : Dict = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=1E-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
_snake_case : int = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_snake_case : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_snake_case : Tuple = pipe.to("cuda" )
_snake_case : List[Any] = "Spiderman is surfing"
_snake_case : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
_snake_case : int = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=25 , output_type="pt" ).frames
_snake_case : int = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowerCamelCase__ ( self ):
_snake_case : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
_snake_case : str = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_snake_case : int = pipe.to("cuda" )
_snake_case : Any = "Spiderman is surfing"
_snake_case : str = torch.Generator(device="cpu" ).manual_seed(0 )
_snake_case : Any = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="pt" ).frames
_snake_case : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 87
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __UpperCamelCase :
lowercase : Union[str, Any] = MBartConfig
lowercase : List[str] = {}
lowercase : List[Any] = 'gelu'
def __init__( self :Tuple ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any]=1_3 ,_UpperCamelCase :Optional[int]=7 ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :Optional[Any]=False ,_UpperCamelCase :Union[str, Any]=9_9 ,_UpperCamelCase :List[str]=3_2 ,_UpperCamelCase :Tuple=2 ,_UpperCamelCase :Dict=4 ,_UpperCamelCase :List[str]=3_7 ,_UpperCamelCase :Any=0.1 ,_UpperCamelCase :List[str]=0.1 ,_UpperCamelCase :Dict=2_0 ,_UpperCamelCase :List[Any]=2 ,_UpperCamelCase :Dict=1 ,_UpperCamelCase :List[Any]=0 ,):
snake_case_ : Any = parent
snake_case_ : Tuple = batch_size
snake_case_ : Any = seq_length
snake_case_ : List[str] = is_training
snake_case_ : Optional[Any] = use_labels
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : Optional[int] = intermediate_size
snake_case_ : List[Any] = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : Dict = max_position_embeddings
snake_case_ : Tuple = eos_token_id
snake_case_ : Tuple = pad_token_id
snake_case_ : Union[str, Any] = bos_token_id
def a__ ( self :str ):
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
snake_case_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
snake_case_ : str = tf.concat([input_ids, eos_tensor] ,axis=1 )
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case_ : str = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
snake_case_ : Tuple = prepare_mbart_inputs_dict(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
return config, inputs_dict
def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any] ):
snake_case_ : Optional[Any] = TFMBartModel(config=_UpperCamelCase ).get_decoder()
snake_case_ : int = inputs_dict["""input_ids"""]
snake_case_ : List[Any] = input_ids[:1, :]
snake_case_ : Optional[int] = inputs_dict["""attention_mask"""][:1, :]
snake_case_ : Dict = inputs_dict["""head_mask"""]
snake_case_ : Optional[Any] = 1
# first forward pass
snake_case_ : str = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,head_mask=_UpperCamelCase ,use_cache=_UpperCamelCase )
snake_case_ , snake_case_ : Tuple = outputs.to_tuple()
snake_case_ : Any = past_key_values[1]
def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict , lowerCamelCase_ :Any=None , lowerCamelCase_ :Any=None , lowerCamelCase_ :str=None , lowerCamelCase_ :Any=None , lowerCamelCase_ :Any=None , ):
'''simple docstring'''
if attention_mask is None:
snake_case_ : List[str] = tf.cast(tf.math.not_equal(lowerCamelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
lowercase : Optional[Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase : str = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase : List[str] = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase : Dict = True
lowercase : str = False
lowercase : Dict = False
def a__ ( self :Dict ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Dict ,_UpperCamelCase :Any ,_UpperCamelCase :int ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def a__ ( self :Dict ):
snake_case_ : Optional[int] = TFMBartModelTester(self )
snake_case_ : Optional[Any] = ConfigTester(self ,config_class=_UpperCamelCase )
def a__ ( self :List[Any] ):
self.config_tester.run_common_tests()
def a__ ( self :List[str] ):
snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
lowercase : Optional[int] = [
' UN Chief Says There Is No Military Solution in Syria',
]
lowercase : Optional[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
lowercase : List[Any] = 'facebook/mbart-large-en-ro'
@cached_property
def a__ ( self :List[Any] ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def a__ ( self :int ):
snake_case_ : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def a__ ( self :Tuple ,**_UpperCamelCase :str ):
snake_case_ : Tuple = self.translate_src_text(**_UpperCamelCase )
self.assertListEqual(self.expected_text ,_UpperCamelCase )
def a__ ( self :Tuple ,**_UpperCamelCase :Any ):
snake_case_ : Optional[Any] = self.tokenizer(self.src_text ,**_UpperCamelCase ,return_tensors="""tf""" )
snake_case_ : Union[str, Any] = self.model.generate(
model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 )
snake_case_ : Any = self.tokenizer.batch_decode(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase )
return generated_words
@slow
def a__ ( self :Dict ):
self._assert_generated_batch_equal_expected()
| 334
|
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__A : int = logging.get_logger(__name__)
__A : List[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
__A : Union[str, Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ):
'''simple docstring'''
for attribute in key.split(""".""" ):
snake_case_ : Union[str, Any] = getattr(lowerCamelCase_ , lowerCamelCase_ )
if weight_type is not None:
snake_case_ : str = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
else:
snake_case_ : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case_ : Optional[int] = value
elif weight_type == "weight_g":
snake_case_ : List[str] = value
elif weight_type == "weight_v":
snake_case_ : int = value
elif weight_type == "bias":
snake_case_ : List[str] = value
else:
snake_case_ : List[str] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] ):
'''simple docstring'''
snake_case_ : str = []
snake_case_ : Any = fairseq_model.state_dict()
snake_case_ : str = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == """group""" , )
snake_case_ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ : List[str] = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case_ : Union[str, Any] = True
if "*" in mapped_key:
snake_case_ : Optional[Any] = name.split(lowerCamelCase_ )[0].split(""".""" )[-2]
snake_case_ : List[str] = mapped_key.replace("""*""" , lowerCamelCase_ )
if "weight_g" in name:
snake_case_ : Optional[int] = """weight_g"""
elif "weight_v" in name:
snake_case_ : Tuple = """weight_v"""
elif "bias" in name:
snake_case_ : List[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ : Union[str, Any] = """weight"""
else:
snake_case_ : Union[str, Any] = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Dict ):
'''simple docstring'''
snake_case_ : Any = full_name.split("""conv_layers.""" )[-1]
snake_case_ : Union[str, Any] = name.split(""".""" )
snake_case_ : Tuple = int(items[0] )
snake_case_ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case_ : Dict = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case_ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case_ : Optional[int] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case_ : Union[str, Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase_ )
@torch.no_grad()
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :int=None , lowerCamelCase_ :int=True ):
'''simple docstring'''
if config_path is not None:
snake_case_ : Union[str, Any] = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
else:
snake_case_ : List[str] = UniSpeechSatConfig()
snake_case_ : List[str] = """"""
if is_finetuned:
snake_case_ : Any = UniSpeechSatForCTC(lowerCamelCase_ )
else:
snake_case_ : List[str] = UniSpeechSatForPreTraining(lowerCamelCase_ )
snake_case_ , snake_case_ , snake_case_ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
snake_case_ : List[Any] = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ )
hf_wavavec.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__A : Any = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 334
| 1
|
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class snake_case__ :
def __init__( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
__snake_case : str = psutil.Process()
__snake_case : Any = False
def A_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__snake_case : str = -1
while True:
__snake_case : List[str] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def A_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = True
__snake_case : Union[str, Any] = threading.Thread(target=self.peak_monitor )
__snake_case : Union[str, Any] = True
self.thread.start()
def A_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = False
self.thread.join()
return self.cpu_memory_peak
A__ : List[Any] = PeakCPUMemory()
def a_ ( ) -> List[str]:
__snake_case : Optional[int] = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__snake_case : str = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__snake_case : Optional[int] = torch.cuda.memory_allocated(a_ )
torch.cuda.reset_peak_memory_stats()
return measures
def a_ ( _UpperCAmelCase : List[Any] ) -> Any:
__snake_case : List[Any] = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__snake_case : Optional[Any] = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
__snake_case : Any = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__snake_case : str = (torch.cuda.memory_allocated(a_ ) - start_measures[str(a_ )]) / 2**20
__snake_case : List[str] = (torch.cuda.max_memory_allocated(a_ ) - start_measures[str(a_ )]) / 2**20
return measures
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : int ) -> Optional[int]:
print(f'''{description}:''' )
print(f'''- Time: {measures["time"]:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(f'''- GPU {i} allocated: {measures[str(a_ )]:.2f}MiB''' )
__snake_case : Optional[int] = measures[f'''{i}-peak''']
print(f'''- GPU {i} peak: {peak:.2f}MiB''' )
print(f'''- CPU RAM allocated: {measures["cpu"]:.2f}MiB''' )
print(f'''- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB''' )
| 718
|
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' ,[
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] ,)
def a_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[Any] ,) -> List[str]:
__snake_case : Optional[Any] = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
__snake_case , __snake_case : Union[str, Any] = input_paths_and_base_extractors[compression_format]
if input_path is None:
__snake_case : List[Any] = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCAmelCase )
assert base_extractor.is_extractable(_UpperCAmelCase )
__snake_case : List[Any] = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(_UpperCAmelCase ,_UpperCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : Any = file_path.read_text(encoding='utf-8' )
else:
__snake_case : str = output_path.read_text(encoding='utf-8' )
__snake_case : Any = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' ,[
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] ,)
def a_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Dict ,) -> List[Any]:
__snake_case : List[str] = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
__snake_case : List[Any] = input_paths[compression_format]
if input_path is None:
__snake_case : Tuple = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCAmelCase )
__snake_case : Optional[Any] = Extractor.infer_extractor_format(_UpperCAmelCase )
assert extractor_format is not None
__snake_case : Union[str, Any] = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : Optional[Any] = file_path.read_text(encoding='utf-8' )
else:
__snake_case : List[Any] = output_path.read_text(encoding='utf-8' )
__snake_case : Dict = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict:
import tarfile
__snake_case : List[str] = tmp_path / 'data_dot_dot'
directory.mkdir()
__snake_case : Optional[int] = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(_UpperCAmelCase ,'w' ) as f:
f.add(_UpperCAmelCase ,arcname=os.path.join('..' ,text_file.name ) )
return path
@pytest.fixture
def a_ ( _UpperCAmelCase : int ) -> Dict:
import tarfile
__snake_case : Optional[int] = tmp_path / 'data_sym_link'
directory.mkdir()
__snake_case : Tuple = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' ,directory / 'subdir' ,target_is_directory=_UpperCAmelCase )
with tarfile.TarFile(_UpperCAmelCase ,'w' ) as f:
f.add(str(directory / 'subdir' ) ,arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' ,[('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] ,)
def a_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ) -> Union[str, Any]:
__snake_case : Dict = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
__snake_case : int = insecure_tar_files[insecure_tar_file]
__snake_case : Optional[Any] = tmp_path / 'extracted'
TarExtractor.extract(_UpperCAmelCase ,_UpperCAmelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a_ ( _UpperCAmelCase : Union[str, Any] ) -> Dict:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
__snake_case : Dict = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
__snake_case : str = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
b'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
b'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
b'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(_UpperCAmelCase )
assert zipfile.is_zipfile(str(_UpperCAmelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(_UpperCAmelCase ) # but we're right
| 124
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""microsoft/unispeech-sat-base-100h-libri-ft""": (
"""https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"""
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase__ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = 'unispeech-sat'
def __init__( self: Optional[int] , __lowerCAmelCase: Tuple=32 , __lowerCAmelCase: Optional[Any]=768 , __lowerCAmelCase: Optional[int]=12 , __lowerCAmelCase: List[Any]=12 , __lowerCAmelCase: Dict=3_072 , __lowerCAmelCase: Optional[int]="gelu" , __lowerCAmelCase: List[Any]=0.1 , __lowerCAmelCase: Tuple=0.1 , __lowerCAmelCase: str=0.1 , __lowerCAmelCase: Any=0.0 , __lowerCAmelCase: List[str]=0.0 , __lowerCAmelCase: Optional[Any]=0.1 , __lowerCAmelCase: Tuple=0.1 , __lowerCAmelCase: Tuple=0.02 , __lowerCAmelCase: Any=1E-5 , __lowerCAmelCase: Union[str, Any]="group" , __lowerCAmelCase: Any="gelu" , __lowerCAmelCase: Optional[int]=(512, 512, 512, 512, 512, 512, 512) , __lowerCAmelCase: Tuple=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase: Optional[int]=(10, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase: List[str]=False , __lowerCAmelCase: str=128 , __lowerCAmelCase: Dict=16 , __lowerCAmelCase: Optional[int]=False , __lowerCAmelCase: Dict=True , __lowerCAmelCase: Union[str, Any]=0.05 , __lowerCAmelCase: Union[str, Any]=10 , __lowerCAmelCase: List[Any]=2 , __lowerCAmelCase: Dict=0.0 , __lowerCAmelCase: Any=10 , __lowerCAmelCase: Optional[int]=0 , __lowerCAmelCase: Any=320 , __lowerCAmelCase: Optional[int]=2 , __lowerCAmelCase: Dict=0.1 , __lowerCAmelCase: Any=100 , __lowerCAmelCase: Any=256 , __lowerCAmelCase: Union[str, Any]=256 , __lowerCAmelCase: Any=0.1 , __lowerCAmelCase: Union[str, Any]="mean" , __lowerCAmelCase: List[Any]=False , __lowerCAmelCase: Tuple=False , __lowerCAmelCase: Optional[Any]=256 , __lowerCAmelCase: Union[str, Any]=(512, 512, 512, 512, 1_500) , __lowerCAmelCase: List[str]=(5, 3, 3, 1, 1) , __lowerCAmelCase: int=(1, 2, 3, 1, 1) , __lowerCAmelCase: Optional[int]=512 , __lowerCAmelCase: List[Any]=0 , __lowerCAmelCase: List[Any]=1 , __lowerCAmelCase: Optional[Any]=2 , __lowerCAmelCase: Optional[int]=504 , **__lowerCAmelCase: Dict , ) -> List[str]:
'''simple docstring'''
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
__UpperCAmelCase = hidden_size
__UpperCAmelCase = feat_extract_norm
__UpperCAmelCase = feat_extract_activation
__UpperCAmelCase = list(__lowerCAmelCase )
__UpperCAmelCase = list(__lowerCAmelCase )
__UpperCAmelCase = list(__lowerCAmelCase )
__UpperCAmelCase = conv_bias
__UpperCAmelCase = num_conv_pos_embeddings
__UpperCAmelCase = num_conv_pos_embedding_groups
__UpperCAmelCase = len(self.conv_dim )
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = feat_proj_dropout
__UpperCAmelCase = final_dropout
__UpperCAmelCase = layerdrop
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = initializer_range
__UpperCAmelCase = vocab_size
__UpperCAmelCase = num_clusters
__UpperCAmelCase = do_stable_layer_norm
__UpperCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase = apply_spec_augment
__UpperCAmelCase = mask_time_prob
__UpperCAmelCase = mask_time_length
__UpperCAmelCase = mask_time_min_masks
__UpperCAmelCase = mask_feature_prob
__UpperCAmelCase = mask_feature_length
__UpperCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCAmelCase = num_codevectors_per_group
__UpperCAmelCase = num_codevector_groups
__UpperCAmelCase = contrastive_logits_temperature
__UpperCAmelCase = feat_quantizer_dropout
__UpperCAmelCase = num_negatives
__UpperCAmelCase = codevector_dim
__UpperCAmelCase = proj_codevector_dim
__UpperCAmelCase = diversity_loss_weight
# ctc loss
__UpperCAmelCase = ctc_loss_reduction
__UpperCAmelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCAmelCase = list(__lowerCAmelCase )
__UpperCAmelCase = list(__lowerCAmelCase )
__UpperCAmelCase = list(__lowerCAmelCase )
__UpperCAmelCase = xvector_output_dim
@property
def _UpperCAmelCase ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 221
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : List[str] ):
lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(
image=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
lowerCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 169
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ = logging.get_logger(__name__)
A__ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : Optional[int] = """blip_2_vision_model"""
def __init__( self :Union[str, Any] ,__lowercase :str=1_4_0_8 ,__lowercase :Optional[Any]=6_1_4_4 ,__lowercase :Union[str, Any]=3_9 ,__lowercase :List[str]=1_6 ,__lowercase :List[str]=2_2_4 ,__lowercase :int=1_4 ,__lowercase :str="gelu" ,__lowercase :int=0.0_0001 ,__lowercase :int=0.0 ,__lowercase :Optional[Any]=1e-1_0 ,__lowercase :Tuple=True ,**__lowercase :List[Any] ,):
super().__init__(**__lowercase )
snake_case__ : Dict = hidden_size
snake_case__ : int = intermediate_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : List[str] = patch_size
snake_case__ : List[Any] = image_size
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Optional[Any] = attention_dropout
snake_case__ : Any = layer_norm_eps
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Any = qkv_bias
@classmethod
def __lowerCamelCase ( cls :Dict ,__lowercase :Union[str, os.PathLike] ,**__lowercase :Any ):
cls._set_token_in_kwargs(__lowercase )
snake_case__ , snake_case__ : Optional[int] = cls.get_config_dict(__lowercase ,**__lowercase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
snake_case__ : int = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase ,**__lowercase )
class a ( __lowerCamelCase ):
__lowerCAmelCase : Any = """blip_2_qformer"""
def __init__( self :Optional[int] ,__lowercase :List[Any]=3_0_5_2_2 ,__lowercase :Tuple=7_6_8 ,__lowercase :Optional[int]=1_2 ,__lowercase :int=1_2 ,__lowercase :Union[str, Any]=3_0_7_2 ,__lowercase :List[Any]="gelu" ,__lowercase :Optional[int]=0.1 ,__lowercase :Tuple=0.1 ,__lowercase :Dict=5_1_2 ,__lowercase :Tuple=0.02 ,__lowercase :int=1e-1_2 ,__lowercase :Tuple=0 ,__lowercase :Any="absolute" ,__lowercase :Optional[int]=2 ,__lowercase :Dict=1_4_0_8 ,**__lowercase :List[str] ,):
super().__init__(pad_token_id=__lowercase ,**__lowercase )
snake_case__ : int = vocab_size
snake_case__ : List[Any] = hidden_size
snake_case__ : str = num_hidden_layers
snake_case__ : Tuple = num_attention_heads
snake_case__ : str = hidden_act
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : int = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : Tuple = max_position_embeddings
snake_case__ : Any = initializer_range
snake_case__ : Optional[int] = layer_norm_eps
snake_case__ : Any = position_embedding_type
snake_case__ : Optional[Any] = cross_attention_frequency
snake_case__ : Optional[Any] = encoder_hidden_size
@classmethod
def __lowerCamelCase ( cls :str ,__lowercase :Union[str, os.PathLike] ,**__lowercase :Optional[Any] ):
cls._set_token_in_kwargs(__lowercase )
snake_case__ , snake_case__ : str = cls.get_config_dict(__lowercase ,**__lowercase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
snake_case__ : Optional[int] = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase ,**__lowercase )
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[str] = """blip-2"""
__lowerCAmelCase : List[str] = True
def __init__( self :Dict ,__lowercase :Any=None ,__lowercase :str=None ,__lowercase :List[Any]=None ,__lowercase :str=3_2 ,**__lowercase :Any ):
super().__init__(**__lowercase )
if vision_config is None:
snake_case__ : Optional[int] = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
snake_case__ : int = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
snake_case__ : Dict = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
snake_case__ : Any = BlipaVisionConfig(**__lowercase )
snake_case__ : int = BlipaQFormerConfig(**__lowercase )
snake_case__ : List[str] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
snake_case__ : Any = CONFIG_MAPPING[text_model_type](**__lowercase )
snake_case__ : Tuple = self.text_config.tie_word_embeddings
snake_case__ : Dict = self.text_config.is_encoder_decoder
snake_case__ : int = num_query_tokens
snake_case__ : str = self.vision_config.hidden_size
snake_case__ : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case__ : Dict = 1.0
snake_case__ : str = 0.02
@classmethod
def __lowerCamelCase ( cls :Dict ,__lowercase :BlipaVisionConfig ,__lowercase :BlipaQFormerConfig ,__lowercase :PretrainedConfig ,**__lowercase :int ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**__lowercase ,)
def __lowerCamelCase ( self :Dict ):
snake_case__ : Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case__ : str = self.vision_config.to_dict()
snake_case__ : Tuple = self.qformer_config.to_dict()
snake_case__ : str = self.text_config.to_dict()
snake_case__ : Union[str, Any] = self.__class__.model_type
return output
| 219
|
import unittest
from transformers import DonutProcessor
A__ = '''naver-clova-ix/donut-base'''
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : str = DonutProcessor.from_pretrained(__lowercase )
def __lowerCamelCase ( self :int ):
snake_case__ : List[Any] = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case__ : List[Any] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case__ : Any = self.processor.tokenajson(__lowercase )
self.assertDictEqual(__lowercase ,__lowercase )
| 219
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = ["""image_processor""", """tokenizer"""]
_UpperCAmelCase = """ViTImageProcessor"""
_UpperCAmelCase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE_ : int = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if images is not None:
SCREAMING_SNAKE_CASE_ : Dict = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE_ : List[str] = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE_ : Tuple = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase__ , )
return self.image_processor
| 101
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase=0.9_9_9 , _UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowerCamelCase_: Union[str, Any] = []
for i in range(_UpperCAmelCase ):
lowerCamelCase_: Tuple = i / num_diffusion_timesteps
lowerCamelCase_: Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCAmelCase ) / alpha_bar_fn(_UpperCAmelCase ) , _UpperCAmelCase ) )
return torch.tensor(_UpperCAmelCase , dtype=torch.floataa )
class a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_A = [e.name for e in KarrasDiffusionSchedulers]
_A = 2
@register_to_config
def __init__( self : int , A_ : int = 10_00 , A_ : float = 0.00085 , A_ : float = 0.012 , A_ : str = "linear" , A_ : Optional[Union[np.ndarray, List[float]]] = None , A_ : str = "epsilon" , A_ : str = "linspace" , A_ : int = 0 , ) -> Any:
"""simple docstring"""
if trained_betas is not None:
lowerCamelCase_: Dict = torch.tensor(A_ , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCamelCase_: Tuple = torch.linspace(A_ , A_ , A_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCamelCase_: List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , A_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCamelCase_: Tuple = betas_for_alpha_bar(A_ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
lowerCamelCase_: str = 1.0 - self.betas
lowerCamelCase_: Any = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(A_ , A_ , A_ )
def lowerCAmelCase ( self : List[Any] , A_ : str , A_ : Union[str, Any]=None ) -> int:
"""simple docstring"""
if schedule_timesteps is None:
lowerCamelCase_: Union[str, Any] = self.timesteps
lowerCamelCase_: Tuple = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCamelCase_: List[str] = 1 if len(A_ ) > 1 else 0
else:
lowerCamelCase_: int = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
lowerCamelCase_: List[Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase ( self : int , A_ : torch.FloatTensor , A_ : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
"""simple docstring"""
lowerCamelCase_: List[Any] = self.index_for_timestep(A_ )
if self.state_in_first_order:
lowerCamelCase_: List[Any] = self.sigmas[step_index]
else:
lowerCamelCase_: Optional[Any] = self.sigmas_interpol[step_index]
lowerCamelCase_: Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase ( self : Any , A_ : int , A_ : Union[str, torch.device] = None , A_ : Optional[int] = None , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_: List[str] = num_inference_steps
lowerCamelCase_: int = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCamelCase_: int = np.linspace(0 , num_train_timesteps - 1 , A_ , dtype=A_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCamelCase_: Dict = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCamelCase_: List[Any] = (np.arange(0 , A_ ) * step_ratio).round()[::-1].copy().astype(A_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCamelCase_: Optional[int] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCamelCase_: Any = (np.arange(A_ , 0 , -step_ratio )).round().copy().astype(A_ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
lowerCamelCase_: Dict = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCamelCase_: Any = torch.from_numpy(np.log(A_ ) ).to(A_ )
lowerCamelCase_: Union[str, Any] = np.interp(A_ , np.arange(0 , len(A_ ) ) , A_ )
lowerCamelCase_: List[str] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCamelCase_: Tuple = torch.from_numpy(A_ ).to(device=A_ )
# interpolate sigmas
lowerCamelCase_: List[Any] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
lowerCamelCase_: List[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
lowerCamelCase_: int = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(A_ ).startswith("""mps""" ):
# mps does not support float64
lowerCamelCase_: List[Any] = torch.from_numpy(A_ ).to(A_ , dtype=torch.floataa )
else:
lowerCamelCase_: Union[str, Any] = torch.from_numpy(A_ ).to(A_ )
# interpolate timesteps
lowerCamelCase_: Optional[int] = self.sigma_to_t(A_ ).to(A_ , dtype=timesteps.dtype )
lowerCamelCase_: Optional[int] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
lowerCamelCase_: str = torch.cat([timesteps[:1], interleaved_timesteps] )
lowerCamelCase_: Tuple = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCamelCase_: List[str] = defaultdict(A_ )
def lowerCAmelCase ( self : Tuple , A_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
# get log sigma
lowerCamelCase_: List[Any] = sigma.log()
# get distribution
lowerCamelCase_: Union[str, Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
lowerCamelCase_: Any = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
lowerCamelCase_: int = low_idx + 1
lowerCamelCase_: Optional[Any] = self.log_sigmas[low_idx]
lowerCamelCase_: Dict = self.log_sigmas[high_idx]
# interpolate sigmas
lowerCamelCase_: int = (low - log_sigma) / (low - high)
lowerCamelCase_: Optional[int] = w.clamp(0 , 1 )
# transform interpolation to time range
lowerCamelCase_: Any = (1 - w) * low_idx + w * high_idx
lowerCamelCase_: Dict = t.view(sigma.shape )
return t
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.sample is None
def lowerCAmelCase ( self : List[Any] , A_ : Union[torch.FloatTensor, np.ndarray] , A_ : Union[float, torch.FloatTensor] , A_ : Union[torch.FloatTensor, np.ndarray] , A_ : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
lowerCamelCase_: int = self.index_for_timestep(A_ )
# advance index counter by 1
lowerCamelCase_: List[str] = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCamelCase_: List[str] = self.sigmas[step_index]
lowerCamelCase_: int = self.sigmas_interpol[step_index + 1]
lowerCamelCase_: int = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
lowerCamelCase_: Union[str, Any] = self.sigmas[step_index - 1]
lowerCamelCase_: List[Any] = self.sigmas_interpol[step_index]
lowerCamelCase_: str = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCamelCase_: List[str] = 0
lowerCamelCase_: int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCamelCase_: str = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCamelCase_: int = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCamelCase_: Tuple = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCamelCase_: Union[str, Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCamelCase_: Optional[int] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCamelCase_: Any = sigma_interpol - sigma_hat
# store for 2nd order step
lowerCamelCase_: str = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
lowerCamelCase_: Dict = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
lowerCamelCase_: List[Any] = sigma_next - sigma_hat
lowerCamelCase_: str = self.sample
lowerCamelCase_: Tuple = None
lowerCamelCase_: str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A_ )
def lowerCAmelCase ( self : Tuple , A_ : torch.FloatTensor , A_ : torch.FloatTensor , A_ : torch.FloatTensor , ) -> torch.FloatTensor:
"""simple docstring"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowerCamelCase_: List[str] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A_ ):
# mps does not support float64
lowerCamelCase_: int = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCamelCase_: Dict = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCamelCase_: int = self.timesteps.to(original_samples.device )
lowerCamelCase_: Dict = timesteps.to(original_samples.device )
lowerCamelCase_: Optional[Any] = [self.index_for_timestep(A_ , A_ ) for t in timesteps]
lowerCamelCase_: Tuple = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCamelCase_: List[Any] = sigma.unsqueeze(-1 )
lowerCamelCase_: List[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 423
| 0
|
from math import sqrt
def A ( _UpperCAmelCase : Optional[int] ) -> bool:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
_UpperCAmelCase = True
# 0 and 1 are none primes.
if number <= 1:
_UpperCAmelCase = False
for divisor in range(2 , int(round(sqrt(__snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_UpperCAmelCase = False
break
# precondition
assert isinstance(__snake_case , __snake_case ), "'status' must been from type bool"
return status
def A ( _UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_UpperCAmelCase = list(range(2 , n + 1 ) )
_UpperCAmelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__snake_case ) ):
for j in range(i + 1 , len(__snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_UpperCAmelCase = 0
# filters actual prime numbers.
_UpperCAmelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def A ( _UpperCAmelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (n > 2), "'N' must been an int and > 2"
_UpperCAmelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__snake_case ):
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def A ( _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and number >= 0, "'number' must been an int and >= 0"
_UpperCAmelCase = [] # this list will be returns of the function.
# potential prime number factors.
_UpperCAmelCase = 2
_UpperCAmelCase = number
if number == 0 or number == 1:
ans.append(__snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__snake_case ):
while quotient != 1:
if is_prime(__snake_case ) and (quotient % factor == 0):
ans.append(__snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def A ( _UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCAmelCase = 0
# prime factorization of 'number'
_UpperCAmelCase = prime_factorization(__snake_case )
_UpperCAmelCase = max(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type int"
return ans
def A ( _UpperCAmelCase : str ) -> int:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCAmelCase = 0
# prime factorization of 'number'
_UpperCAmelCase = prime_factorization(__snake_case )
_UpperCAmelCase = min(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type int"
return ans
def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __snake_case ), "compare bust been from type bool"
return number % 2 == 0
def A ( _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __snake_case ), "compare bust been from type bool"
return number % 2 != 0
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
assert (
isinstance(__snake_case , __snake_case ) and (number > 2) and is_even(__snake_case )
), "'number' must been an int, even and > 2"
_UpperCAmelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_UpperCAmelCase = get_prime_numbers(__snake_case )
_UpperCAmelCase = len(__snake_case )
# run variable for while-loops.
_UpperCAmelCase = 0
_UpperCAmelCase = None
# exit variable. for break up the loops
_UpperCAmelCase = True
while i < len_pn and loop:
_UpperCAmelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_UpperCAmelCase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (len(__snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def A ( _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_UpperCAmelCase = 0
while numbera != 0:
_UpperCAmelCase = numbera % numbera
_UpperCAmelCase = numbera
_UpperCAmelCase = rest
# precondition
assert isinstance(__snake_case , __snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_UpperCAmelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_UpperCAmelCase = prime_factorization(__snake_case )
_UpperCAmelCase = prime_factorization(__snake_case )
elif numbera == 1 or numbera == 1:
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = max(__snake_case , __snake_case )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_UpperCAmelCase = prime_fac_a.count(__snake_case )
_UpperCAmelCase = prime_fac_a.count(__snake_case )
for _ in range(max(__snake_case , __snake_case ) ):
ans *= n
else:
_UpperCAmelCase = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_UpperCAmelCase = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def A ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'number' must been a positive int"
_UpperCAmelCase = 0
_UpperCAmelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__snake_case ):
ans += 1
# precondition
assert isinstance(__snake_case , __snake_case ) and is_prime(
__snake_case ), "'ans' must been a prime number and from type int"
return ans
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
assert (
is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_UpperCAmelCase = p_number_a + 1 # jump to the next number
_UpperCAmelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
while number < p_number_a:
ans.append(__snake_case )
number += 1
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
# precondition
assert (
isinstance(__snake_case , __snake_case )
and ans[0] != p_number_a
and ans[len(__snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def A ( _UpperCAmelCase : int ) -> List[Any]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (n >= 1), "'n' must been int and >= 1"
_UpperCAmelCase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__snake_case )
# precondition
assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def A ( _UpperCAmelCase : Any ) -> str:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
_UpperCAmelCase = get_divisors(__snake_case )
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (divisors[0] == 1)
and (divisors[len(__snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_UpperCAmelCase = gcd(abs(__snake_case ) , abs(__snake_case ) )
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def A ( _UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'n' must been a int and >= 0"
_UpperCAmelCase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def A ( _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'n' must been an int and >= 0"
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = 1 # this will be return
for _ in range(n - 1 ):
_UpperCAmelCase = ans
ans += fiba
_UpperCAmelCase = tmp
return ans
| 712
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCAmelCase__ = "▁"
# Segments (not really needed)
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = '''left'''
UpperCamelCase = XLNetTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : str=None , A : Tuple=False , A : Tuple=True , A : Any=False , A : List[str]="<s>" , A : List[str]="</s>" , A : Optional[int]="<unk>" , A : Tuple="<sep>" , A : str="<pad>" , A : Dict="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , **A : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A):
copyfile(self.vocab_file , A)
return (out_vocab_file,)
| 639
| 0
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A_ = logging.get_logger(__name__)
class _snake_case ( _snake_case ):
_A : List[str] = ['input_features', 'is_longer']
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=64 ,SCREAMING_SNAKE_CASE__ : Any=48_000 ,SCREAMING_SNAKE_CASE__ : int=480 ,SCREAMING_SNAKE_CASE__ : Any=10 ,SCREAMING_SNAKE_CASE__ : Dict=1_024 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : float = 0 ,SCREAMING_SNAKE_CASE__ : float = 14_000 ,SCREAMING_SNAKE_CASE__ : int = None ,SCREAMING_SNAKE_CASE__ : str = "fusion" ,SCREAMING_SNAKE_CASE__ : str = "repeatpad" ,**SCREAMING_SNAKE_CASE__ : Dict ,):
super().__init__(
feature_size=SCREAMING_SNAKE_CASE__ ,sampling_rate=SCREAMING_SNAKE_CASE__ ,padding_value=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
SCREAMING_SNAKE_CASE:Tuple = top_db
SCREAMING_SNAKE_CASE:Dict = truncation
SCREAMING_SNAKE_CASE:Union[str, Any] = padding
SCREAMING_SNAKE_CASE:str = fft_window_size
SCREAMING_SNAKE_CASE:str = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE:Tuple = hop_length
SCREAMING_SNAKE_CASE:Any = max_length_s
SCREAMING_SNAKE_CASE:Optional[Any] = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE:Any = sampling_rate
SCREAMING_SNAKE_CASE:Optional[int] = frequency_min
SCREAMING_SNAKE_CASE:Union[str, Any] = frequency_max
SCREAMING_SNAKE_CASE:Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=SCREAMING_SNAKE_CASE__ ,min_frequency=SCREAMING_SNAKE_CASE__ ,max_frequency=SCREAMING_SNAKE_CASE__ ,sampling_rate=SCREAMING_SNAKE_CASE__ ,norm=SCREAMING_SNAKE_CASE__ ,mel_scale="htk" ,)
SCREAMING_SNAKE_CASE:int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=SCREAMING_SNAKE_CASE__ ,min_frequency=SCREAMING_SNAKE_CASE__ ,max_frequency=SCREAMING_SNAKE_CASE__ ,sampling_rate=SCREAMING_SNAKE_CASE__ ,norm="slaney" ,mel_scale="slaney" ,)
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE:Optional[Any] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE:int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : np.array ,SCREAMING_SNAKE_CASE__ : Optional[np.array] = None ):
SCREAMING_SNAKE_CASE:Optional[int] = spectrogram(
SCREAMING_SNAKE_CASE__ ,window_function(self.fft_window_size ,"hann" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=SCREAMING_SNAKE_CASE__ ,log_mel="dB" ,)
return log_mel_spectrogram.T
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ):
SCREAMING_SNAKE_CASE:Optional[int] = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE:Dict = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE:List[str] = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE:Optional[Any] = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE:List[Any] = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE:Dict = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE:List[str] = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE:List[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE:Any = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE:int = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE:List[str] = torch.nn.functional.interpolate(
SCREAMING_SNAKE_CASE__ ,size=[chunk_frames, 64] ,mode="bilinear" ,align_corners=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE:Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : np.array ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE:List[str] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE:List[Any] = len(SCREAMING_SNAKE_CASE__ ) - max_length
SCREAMING_SNAKE_CASE:Tuple = np.random.randint(0 ,overflow + 1 )
SCREAMING_SNAKE_CASE:Any = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE:str = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE__ ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE:Any = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE__ ,self.mel_filters )
SCREAMING_SNAKE_CASE:Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE:Dict = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE:Tuple = np.stack([mel, mel, mel, mel] ,axis=0 )
SCREAMING_SNAKE_CASE:Union[str, Any] = False
else:
SCREAMING_SNAKE_CASE:Optional[int] = self._random_mel_fusion(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
SCREAMING_SNAKE_CASE:List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE:Dict = int(max_length / len(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:Any = np.stack(np.tile(SCREAMING_SNAKE_CASE__ ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE:Optional[Any] = int(max_length / len(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:Optional[Any] = np.stack(np.tile(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:Optional[Any] = np.pad(SCREAMING_SNAKE_CASE__ ,(0, max_length - waveform.shape[0]) ,mode="constant" ,constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE:Optional[Any] = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE__ ,self.mel_filters )
SCREAMING_SNAKE_CASE:List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
SCREAMING_SNAKE_CASE:Tuple = self._np_extract_fbank_features(SCREAMING_SNAKE_CASE__ ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,SCREAMING_SNAKE_CASE__ : str = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,**SCREAMING_SNAKE_CASE__ : str ,):
SCREAMING_SNAKE_CASE:Tuple = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE:int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
SCREAMING_SNAKE_CASE:Tuple = isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE:Dict = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE:List[Any] = [np.asarray(SCREAMING_SNAKE_CASE__ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray ):
SCREAMING_SNAKE_CASE:Optional[Any] = np.asarray(SCREAMING_SNAKE_CASE__ ,dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE:Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE:Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE__ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE:List[Any] = [
self._get_input_mel(SCREAMING_SNAKE_CASE__ ,max_length if max_length else self.nb_max_samples ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE:Tuple = []
SCREAMING_SNAKE_CASE:Optional[Any] = []
for mel, longer in padded_inputs:
input_mel.append(SCREAMING_SNAKE_CASE__ )
is_longer.append(SCREAMING_SNAKE_CASE__ )
if truncation == "fusion" and sum(SCREAMING_SNAKE_CASE__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE:int = np.random.randint(0 ,len(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:Union[str, Any] = True
if isinstance(input_mel[0] ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:Dict = [np.asarray(SCREAMING_SNAKE_CASE__ ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE:List[Any] = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE:str = {"input_features": input_mel, "is_longer": is_longer}
SCREAMING_SNAKE_CASE:Optional[Any] = BatchFeature(SCREAMING_SNAKE_CASE__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE:str = input_features.convert_to_tensors(SCREAMING_SNAKE_CASE__ )
return input_features
| 143
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (boundary[1] - boundary[0]) / steps
__SCREAMING_SNAKE_CASE = boundary[0]
__SCREAMING_SNAKE_CASE = boundary[1]
__SCREAMING_SNAKE_CASE = make_points(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = 0.0
y += (h / 2.0) * f(__UpperCAmelCase )
for i in x_i:
# print(i)
y += h * f(__UpperCAmelCase )
y += (h / 2.0) * f(__UpperCAmelCase )
return y
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = a + h
while x < (b - h):
yield x
__SCREAMING_SNAKE_CASE = x + h
def __magic_name__ ( __UpperCAmelCase ) -> int: # enter your function here
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (x - 0) * (x - 0)
return y
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0.0 # Lower bound of integration
__SCREAMING_SNAKE_CASE = 1.0 # Upper bound of integration
__SCREAMING_SNAKE_CASE = 1_0.0 # define number of steps or resolution
__SCREAMING_SNAKE_CASE = [a, b] # define boundary of integration
__SCREAMING_SNAKE_CASE = method_a(__UpperCAmelCase , __UpperCAmelCase )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 109
| 0
|
import operator
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = None ):
lowercase__ : Optional[int] = operator.lt if reverse else operator.gt
lowercase__ : Optional[int] = solution or []
if not arr:
return solution
lowercase__ : str = [arr.pop(0 )]
for i, item in enumerate(UpperCAmelCase ):
if _operator(UpperCAmelCase , sublist[-1] ):
sublist.append(UpperCAmelCase )
arr.pop(UpperCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(UpperCAmelCase )
else:
while sublist:
lowercase__ : Tuple = sublist.pop(0 )
for i, xx in enumerate(UpperCAmelCase ):
if not _operator(UpperCAmelCase , UpperCAmelCase ):
solution.insert(UpperCAmelCase , UpperCAmelCase )
break
else:
solution.append(UpperCAmelCase )
strand_sort(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 716
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__a: List[Any] = logging.getLogger(__name__)
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "sequence-classification"
def __init__( self , __lowerCAmelCase ) -> Optional[Any]:
if type(__lowerCAmelCase ) == dict:
lowercase__ : str = Namespace(**__lowerCAmelCase )
lowercase__ : str = glue_output_modes[hparams.task]
lowercase__ : Dict = glue_tasks_num_labels[hparams.task]
super().__init__(__lowerCAmelCase , __lowerCAmelCase , self.mode )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> List[str]:
return self.model(**__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> int:
lowercase__ : Any = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase__ : str = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowercase__ : int = self(**__lowerCAmelCase )
lowercase__ : Optional[Any] = outputs[0]
lowercase__ : List[str] = self.trainer.lr_schedulers[0]['''scheduler''']
lowercase__ : str = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : int = self.hparams
lowercase__ : Tuple = processors[args.task]()
lowercase__ : List[str] = processor.get_labels()
for mode in ["train", "dev"]:
lowercase__ : Union[str, Any] = self._feature_file(__lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __lowerCAmelCase )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
lowercase__ : Union[str, Any] = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
lowercase__ : List[Any] = convert_examples_to_features(
__lowerCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , __lowerCAmelCase )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> DataLoader:
lowercase__ : Dict = '''dev''' if mode == '''test''' else mode
lowercase__ : List[str] = self._feature_file(__lowerCAmelCase )
logger.info('''Loading features from cached file %s''' , __lowerCAmelCase )
lowercase__ : Dict = torch.load(__lowerCAmelCase )
lowercase__ : List[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ : List[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowercase__ : str = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowercase__ : Tuple = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase__ : Union[str, Any] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase__ : Union[str, Any] = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowercase__ : List[Any] = self(**__lowerCAmelCase )
lowercase__ , lowercase__ : int = outputs[:2]
lowercase__ : List[str] = logits.detach().cpu().numpy()
lowercase__ : Any = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCAmelCase( self , __lowerCAmelCase ) -> tuple:
lowercase__ : List[Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
lowercase__ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowercase__ : Any = np.argmax(__lowerCAmelCase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowercase__ : Optional[Any] = np.squeeze(__lowerCAmelCase )
lowercase__ : Optional[Any] = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
lowercase__ : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : int = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : List[Any] = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCAmelCase , __lowerCAmelCase )}
lowercase__ : int = dict(results.items() )
lowercase__ : Optional[int] = results
return ret, preds_list, out_label_list
def _lowerCAmelCase( self , __lowerCAmelCase ) -> dict:
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(__lowerCAmelCase )
lowercase__ : List[Any] = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCAmelCase( self , __lowerCAmelCase ) -> dict:
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = self._eval_end(__lowerCAmelCase )
lowercase__ : Union[str, Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCAmelCase( __lowerCAmelCase , __lowerCAmelCase ) -> str:
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__lowerCAmelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__lowerCAmelCase , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def __UpperCamelCase ( ):
lowercase__ : int = argparse.ArgumentParser()
add_generic_args(UpperCAmelCase , os.getcwd() )
lowercase__ : List[Any] = GLUETransformer.add_model_specific_args(UpperCAmelCase , os.getcwd() )
lowercase__ : Tuple = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowercase__ : List[Any] = os.path.join(
'''./results''' , F"""{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}""" , )
os.makedirs(args.output_dir )
lowercase__ : Optional[int] = GLUETransformer(UpperCAmelCase )
lowercase__ : Any = generic_train(UpperCAmelCase , UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowercase__ : List[str] = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=UpperCAmelCase ) )
lowercase__ : Tuple = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(UpperCAmelCase )
if __name__ == "__main__":
main()
| 428
| 0
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_SCREAMING_SNAKE_CASE = "\\n\n"
_SCREAMING_SNAKE_CASE = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'input_texts': datasets.Value('string'),
}), reference_urls=['https://huggingface.co/docs/transformers/perplexity'], )
def UpperCamelCase__ ( self :Any, snake_case :int, snake_case :Any, snake_case :int = 16, snake_case :bool = True, snake_case :List[Any]=None):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_lowercase ='cuda'
else:
_lowercase ='cuda' if torch.cuda.is_available() else 'cpu'
_lowercase =AutoModelForCausalLM.from_pretrained(snake_case)
_lowercase =model.to(snake_case)
_lowercase =AutoTokenizer.from_pretrained(snake_case)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_lowercase =list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(snake_case) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]})
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_lowercase =model.config.max_length - 1
else:
_lowercase =model.config.max_length
_lowercase =tokenizer(
snake_case, add_special_tokens=snake_case, padding=snake_case, truncation=snake_case, max_length=snake_case, return_tensors='pt', return_attention_mask=snake_case, ).to(snake_case)
_lowercase =encodings['input_ids']
_lowercase =encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1), 1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1), 2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_lowercase =[]
_lowercase =CrossEntropyLoss(reduction='none')
for start_index in logging.tqdm(range(0, len(snake_case), snake_case)):
_lowercase =min(start_index + batch_size, len(snake_case))
_lowercase =encoded_texts[start_index:end_index]
_lowercase =attn_masks[start_index:end_index]
if add_start_token:
_lowercase =torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(snake_case)
_lowercase =torch.cat([bos_tokens_tensor, encoded_batch], dim=1)
_lowercase =torch.cat(
[torch.ones(bos_tokens_tensor.size(), dtype=torch.intaa).to(snake_case), attn_mask], dim=1)
_lowercase =encoded_batch
with torch.no_grad():
_lowercase =model(snake_case, attention_mask=snake_case).logits
_lowercase =out_logits[..., :-1, :].contiguous()
_lowercase =labels[..., 1:].contiguous()
_lowercase =attn_mask[..., 1:].contiguous()
_lowercase =torch.expa(
(loss_fct(shift_logits.transpose(1, 2), snake_case) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1))
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(snake_case)}
| 181
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : List[str] ='''mvp'''
__lowerCAmelCase : List[Any] =['''past_key_values''']
__lowerCAmelCase : Optional[Any] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self :Any, snake_case :Any=5_0267, snake_case :List[str]=1024, snake_case :List[str]=12, snake_case :List[Any]=4096, snake_case :List[Any]=16, snake_case :Optional[int]=12, snake_case :Optional[int]=4096, snake_case :List[str]=16, snake_case :Optional[Any]=0.0, snake_case :Tuple=0.0, snake_case :str="gelu", snake_case :int=1024, snake_case :Any=0.1, snake_case :Dict=0.0, snake_case :Optional[Any]=0.0, snake_case :int=0.0_2, snake_case :Union[str, Any]=0.0, snake_case :Tuple=False, snake_case :Optional[int]=True, snake_case :Any=1, snake_case :Union[str, Any]=0, snake_case :Optional[Any]=2, snake_case :Union[str, Any]=True, snake_case :Tuple=2, snake_case :Any=2, snake_case :List[Any]=False, snake_case :Optional[int]=100, snake_case :Union[str, Any]=800, **snake_case :Tuple, ):
"""simple docstring"""
_lowercase =vocab_size
_lowercase =max_position_embeddings
_lowercase =d_model
_lowercase =encoder_ffn_dim
_lowercase =encoder_layers
_lowercase =encoder_attention_heads
_lowercase =decoder_ffn_dim
_lowercase =decoder_layers
_lowercase =decoder_attention_heads
_lowercase =dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =activation_function
_lowercase =init_std
_lowercase =encoder_layerdrop
_lowercase =decoder_layerdrop
_lowercase =classifier_dropout
_lowercase =use_cache
_lowercase =encoder_layers
_lowercase =scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase =use_prompt
_lowercase =prompt_length
_lowercase =prompt_mid_dim
super().__init__(
pad_token_id=snake_case, bos_token_id=snake_case, eos_token_id=snake_case, is_encoder_decoder=snake_case, decoder_start_token_id=snake_case, forced_eos_token_id=snake_case, **snake_case, )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated', snake_case):
_lowercase =self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.')
| 181
| 1
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase = logging.getLogger(__name__)
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class UpperCAmelCase :
'''simple docstring'''
__magic_name__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__magic_name__ : Optional[str] = field(
default=__a , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__magic_name__ : Optional[str] = field(
default=__a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__magic_name__ : Optional[str] = field(
default=__a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
__magic_name__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys())})
__magic_name__ : str = field(metadata={"help": "Should contain the data files for the task."})
__magic_name__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__ : bool = field(
default=__a , metadata={"help": "Overwrite the cached training and evaluation sets"})
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
a_ , a_ , a_ =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowercase__ )
# Set seed
set_seed(training_args.seed )
try:
a_ =processors[data_args.task_name]()
a_ =processor.get_labels()
a_ =len(lowercase__ )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
a_ =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a_ =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
# Get datasets
a_ =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowercase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a_ =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowercase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowercase__ ) -> Dict:
a_ =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowercase__ , p.label_ids )}
# Data collator
a_ =DataCollatorWithPadding(lowercase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a_ =Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a_ ={}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a_ =trainer.evaluate()
a_ =os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(lowercase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , lowercase__ , lowercase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(lowercase__ )
return results
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 41
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase = '''path-to-your-trained-model'''
lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowercase = '''A photo of sks dog in a bucket'''
lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 41
| 1
|
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> int:
__UpperCAmelCase , __UpperCAmelCase : Dict = len(snake_case__ ), len(grid[0] )
if (
min(snake_case__, snake_case__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__UpperCAmelCase : List[Any] = 0
count += depth_first_search(snake_case__, row + 1, snake_case__, snake_case__ )
count += depth_first_search(snake_case__, row - 1, snake_case__, snake_case__ )
count += depth_first_search(snake_case__, snake_case__, col + 1, snake_case__ )
count += depth_first_search(snake_case__, snake_case__, col - 1, snake_case__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 382
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_snake_case = '''src/transformers'''
_snake_case = '''docs/source/en'''
_snake_case = '''.'''
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> List[str]:
with open(snake_case__, "r", encoding="utf-8", newline="\n" ) as f:
__UpperCAmelCase : str = f.readlines()
# Find the start prompt.
__UpperCAmelCase : Union[str, Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
__UpperCAmelCase : Optional[int] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_snake_case = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
_snake_case = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
_snake_case = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_snake_case = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = direct_transformers_import(TRANSFORMERS_PATH)
def _UpperCamelCase ( snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : int = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", snake_case__ )
return [m.group(0 ) for m in matches]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[str]:
__UpperCAmelCase : Any = 2 if text == "✅" or text == "❌" else len(snake_case__ )
__UpperCAmelCase : Optional[Any] = (width - text_length) // 2
__UpperCAmelCase : Any = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _UpperCamelCase ( ) -> Union[str, Any]:
__UpperCAmelCase : Dict = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase : Dict = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__UpperCAmelCase : Tuple = {name: config.replace("Config", "" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__UpperCAmelCase : int = collections.defaultdict(snake_case__ )
__UpperCAmelCase : str = collections.defaultdict(snake_case__ )
__UpperCAmelCase : str = collections.defaultdict(snake_case__ )
__UpperCAmelCase : Optional[int] = collections.defaultdict(snake_case__ )
__UpperCAmelCase : Optional[int] = collections.defaultdict(snake_case__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(snake_case__ ):
__UpperCAmelCase : Dict = None
if attr_name.endswith("Tokenizer" ):
__UpperCAmelCase : Tuple = slow_tokenizers
__UpperCAmelCase : Tuple = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
__UpperCAmelCase : Dict = fast_tokenizers
__UpperCAmelCase : int = attr_name[:-13]
elif _re_tf_models.match(snake_case__ ) is not None:
__UpperCAmelCase : Tuple = tf_models
__UpperCAmelCase : Optional[Any] = _re_tf_models.match(snake_case__ ).groups()[0]
elif _re_flax_models.match(snake_case__ ) is not None:
__UpperCAmelCase : List[Any] = flax_models
__UpperCAmelCase : Optional[int] = _re_flax_models.match(snake_case__ ).groups()[0]
elif _re_pt_models.match(snake_case__ ) is not None:
__UpperCAmelCase : Any = pt_models
__UpperCAmelCase : Dict = _re_pt_models.match(snake_case__ ).groups()[0]
if lookup_dict is not None:
while len(snake_case__ ) > 0:
if attr_name in model_name_to_prefix.values():
__UpperCAmelCase : int = True
break
# Try again after removing the last word in the name
__UpperCAmelCase : Dict = "".join(camel_case_split(snake_case__ )[:-1] )
# Let's build that table!
__UpperCAmelCase : Optional[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__UpperCAmelCase : int = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__UpperCAmelCase : Dict = [len(snake_case__ ) + 2 for c in columns]
__UpperCAmelCase : Dict = max([len(snake_case__ ) for name in model_names] ) + 2
# Build the table per se
__UpperCAmelCase : List[str] = "|" + "|".join([_center_text(snake_case__, snake_case__ ) for c, w in zip(snake_case__, snake_case__ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
__UpperCAmelCase : Any = {True: "✅", False: "❌"}
for name in model_names:
__UpperCAmelCase : str = model_name_to_prefix[name]
__UpperCAmelCase : List[Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(snake_case__, snake_case__ ) for l, w in zip(snake_case__, snake_case__ )] ) + "|\n"
return table
def _UpperCamelCase ( snake_case__=False ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = _find_text_in_file(
filename=os.path.join(snake_case__, "index.md" ), start_prompt="<!--This table is updated automatically from the auto modules", end_prompt="<!-- End table-->", )
__UpperCAmelCase : List[Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(snake_case__, "index.md" ), "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_snake_case = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 382
| 1
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase ( lowercase , lowercase , lowercase = "x" , lowercase = 10**-10 , lowercase = 1 , ):
"""simple docstring"""
__lowercase = symbols(lowercase )
__lowercase = lambdify(lowercase , lowercase )
__lowercase = lambdify(lowercase , diff(lowercase , lowercase ) )
__lowercase = starting_point
while True:
if diff_function(lowercase ) != 0:
__lowercase = prev_guess - multiplicity * func(lowercase ) / diff_function(
lowercase )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__lowercase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(F'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}''')
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F'''{newton_raphson("log(y) - 1", 2, variable="y")}''',
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F'''{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}''',
)
# Find root of cos(x)
print(F'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
| 717
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__lowercase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__lowercase = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = self.get_image_processor()
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
__lowercase = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
__lowercase = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowercase = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
__lowercase = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(lowerCAmelCase__ , return_tensors='''np''' )
__lowercase = processor(images=lowerCAmelCase__ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = '''lower newer'''
__lowercase = processor(text=lowerCAmelCase__ )
__lowercase = tokenizer(lowerCAmelCase__ , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = '''lower newer'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.batch_decode(lowerCAmelCase__ )
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = '''lower newer'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 522
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
UpperCAmelCase__ = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 186
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class a__ :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : str , ) -> Optional[int]:
__A= parent
__A= 13
__A= 7
__A= True
__A= True
__A= False
__A= True
__A= 99
__A= 32
__A= 2
__A= 4
__A= 37
__A= 'gelu'
__A= 0.1
__A= 0.1
__A= 512
__A= 16
__A= 2
__A= 0.02
__A= 3
__A= 4
__A= None
def lowerCAmelCase ( self : Optional[Any] ) -> str:
__A= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A= None
if self.use_input_mask:
__A= random_attention_mask([self.batch_size, self.seq_length] )
__A= None
__A= None
__A= None
if self.use_labels:
__A= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A= ids_tensor([self.batch_size] , self.num_choices )
__A= DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] ) -> Any:
__A= TFDistilBertModel(config=lowerCAmelCase_ )
__A= {'input_ids': input_ids, 'attention_mask': input_mask}
__A= model(lowerCAmelCase_ )
__A= [input_ids, input_mask]
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ) -> Optional[int]:
__A= TFDistilBertForMaskedLM(config=lowerCAmelCase_ )
__A= {'input_ids': input_ids, 'attention_mask': input_mask}
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ) -> int:
__A= TFDistilBertForQuestionAnswering(config=lowerCAmelCase_ )
__A= {
'input_ids': input_ids,
'attention_mask': input_mask,
}
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
__A= self.num_labels
__A= TFDistilBertForSequenceClassification(lowerCAmelCase_ )
__A= {'input_ids': input_ids, 'attention_mask': input_mask}
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int ) -> Optional[Any]:
__A= self.num_choices
__A= TFDistilBertForMultipleChoice(lowerCAmelCase_ )
__A= tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
__A= tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
__A= {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Dict:
__A= self.num_labels
__A= TFDistilBertForTokenClassification(lowerCAmelCase_ )
__A= {'input_ids': input_ids, 'attention_mask': input_mask}
__A= model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
__A= self.prepare_config_and_inputs()
((__A), (__A), (__A), (__A), (__A), (__A))= config_and_inputs
__A= {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a__ ( a_ , a_ , unittest.TestCase ):
'''simple docstring'''
A : Optional[Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
A : Optional[int] = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A : str = False
A : List[Any] = False
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__A= TFDistilBertModelTester(self )
__A= ConfigTester(self , config_class=lowerCAmelCase_ , dim=37 )
def lowerCAmelCase ( self : Dict ) -> Tuple:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase_ )
def lowerCAmelCase ( self : str ) -> Any:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase_ )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase_ )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
__A= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : int ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__A= TFDistilBertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_tf
class a__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Any ) -> List[Any]:
__A= TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
__A= tf.constant([[0, 1, 2, 3, 4, 5]] )
__A= model(lowerCAmelCase_ )[0]
__A= [1, 6, 768]
self.assertEqual(output.shape , lowerCAmelCase_ )
__A= tf.constant(
[
[
[0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99],
[0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04],
[0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 )
| 186
| 1
|
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class A_:
"""simple docstring"""
def __init__( self , A ):
_lowerCamelCase : Optional[int] = str(id_ )
_lowerCamelCase : Tuple = None
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = []
_lowerCamelCase : Optional[Any] = {} # {vertex:distance}
def __lt__( self , A ):
return self.key < other.key
def __repr__( self ):
return self.id
def _lowerCAmelCase ( self , A ):
self.neighbors.append(lowercase__ )
def _lowerCAmelCase ( self , A , A ):
_lowerCamelCase : List[str] = weight
def UpperCAmelCase_ ( __a : Any , __a : Dict , __a : Tuple , __a : Union[str, Any] ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowerCAmelCase_ )
graph[b - 1].add_edge(graph[a - 1] , lowerCAmelCase_ )
def UpperCAmelCase_ ( __a : List[Any] , __a : Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = []
for u in graph:
_lowerCamelCase : str = math.inf
_lowerCamelCase : Dict = None
_lowerCamelCase : Dict = 0
_lowerCamelCase : List[str] = graph[:]
while q:
_lowerCamelCase : Tuple = min(lowerCAmelCase_ )
q.remove(lowerCAmelCase_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowerCamelCase : int = u
_lowerCamelCase : str = u.edges[v.id]
for i in range(1 , len(lowerCAmelCase_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase_ ( __a : Tuple , __a : List[str] ):
'''simple docstring'''
for u in graph:
_lowerCamelCase : Optional[Any] = math.inf
_lowerCamelCase : List[str] = None
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Any = list(lowerCAmelCase_ )
hq.heapify(lowerCAmelCase_ )
while h:
_lowerCamelCase : str = hq.heappop(lowerCAmelCase_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowerCamelCase : Tuple = u
_lowerCamelCase : List[Any] = u.edges[v.id]
hq.heapify(lowerCAmelCase_ )
for i in range(1 , len(lowerCAmelCase_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
"""simple docstring"""
import numpy as np
def UpperCAmelCase_ ( __a : np.array ):
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349
| 0
|
def a_ ( lowerCAmelCase_ : str ):
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = credit_card_number
__lowerCAmelCase = 0
__lowerCAmelCase = len(lowerCAmelCase_ ) - 2
for i in range(lowerCAmelCase_, -1, -2 ):
# double the value of every second digit
__lowerCAmelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
__lowerCAmelCase = cc_number[:i] + str(lowerCAmelCase_ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCAmelCase_ ) - 1, -1, -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = F"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(F"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(lowerCAmelCase_ ) <= 16:
print(F"""{error_message} of its length.""" )
return False
if not validate_initial_digits(lowerCAmelCase_ ):
print(F"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(lowerCAmelCase_ ):
print(F"""{error_message} it fails the Luhn check.""" )
return False
print(F"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 53
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 215
| 0
|
from __future__ import annotations
class A_ :
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ):
__a = order
# a_{0} ... a_{k}
__a = [1.0] + [0.0] * order
# b_{0} ... b_{k}
__a = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
__a = [0.0] * self.order
# y[n-1] ... y[n-k]
__a = [0.0] * self.order
def _UpperCAmelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : list[float] , __SCREAMING_SNAKE_CASE : list[float] ):
if len(__SCREAMING_SNAKE_CASE ) < self.order:
__a = [1.0, *a_coeffs]
if len(__SCREAMING_SNAKE_CASE ) != self.order + 1:
__a = (
f"""Expected a_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(__SCREAMING_SNAKE_CASE )}"""
)
raise ValueError(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) != self.order + 1:
__a = (
f"""Expected b_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(__SCREAMING_SNAKE_CASE )}"""
)
raise ValueError(__SCREAMING_SNAKE_CASE )
__a = a_coeffs
__a = b_coeffs
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : float ):
__a = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
__a = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
__a = self.input_history[:-1]
__a = self.output_history[:-1]
__a = sample
__a = result
return result
| 525
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class A_ ( a_ , a_ , a_ ):
@register_to_config
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool = False , ):
super().__init__()
__a = nn.Embedding(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = nn.Embedding(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = False
__a = nn.Dropout(p=__SCREAMING_SNAKE_CASE )
__a = TaConfig(
vocab_size=__SCREAMING_SNAKE_CASE , d_model=__SCREAMING_SNAKE_CASE , num_heads=__SCREAMING_SNAKE_CASE , d_kv=__SCREAMING_SNAKE_CASE , d_ff=__SCREAMING_SNAKE_CASE , dropout_rate=__SCREAMING_SNAKE_CASE , feed_forward_proj=__SCREAMING_SNAKE_CASE , is_decoder=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , )
__a = nn.ModuleList()
for lyr_num in range(__SCREAMING_SNAKE_CASE ):
__a = TaBlock(__SCREAMING_SNAKE_CASE )
self.encoders.append(__SCREAMING_SNAKE_CASE )
__a = TaLayerNorm(__SCREAMING_SNAKE_CASE )
__a = nn.Dropout(p=__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any ):
__a = self.token_embedder(__SCREAMING_SNAKE_CASE )
__a = encoder_input_tokens.shape[1]
__a = torch.arange(__SCREAMING_SNAKE_CASE , device=encoder_input_tokens.device )
x += self.position_encoding(__SCREAMING_SNAKE_CASE )
__a = self.dropout_pre(__SCREAMING_SNAKE_CASE )
# inverted the attention mask
__a = encoder_input_tokens.size()
__a = self.get_extended_attention_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for lyr in self.encoders:
__a = lyr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0]
__a = self.layer_norm(__SCREAMING_SNAKE_CASE )
return self.dropout_post(__SCREAMING_SNAKE_CASE ), encoder_inputs_mask
| 525
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''longformer'''
def __init__( self : str , _UpperCAmelCase : Union[List[int], int] = 512 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 30522 , _UpperCAmelCase : int = 768 , _UpperCAmelCase : int = 12 , _UpperCAmelCase : int = 12 , _UpperCAmelCase : int = 3072 , _UpperCAmelCase : str = "gelu" , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1e-12 , _UpperCAmelCase : bool = False , **_UpperCAmelCase : List[str] , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = attention_window
UpperCAmelCase_ = sep_token_id
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = onnx_export
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : "PretrainedConfig" , _UpperCAmelCase : str = "default" , _UpperCAmelCase : "List[PatchingSpec]" = None ) -> Any:
'''simple docstring'''
super().__init__(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = True
@property
def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
UpperCAmelCase_ = super().outputs
if self.task == "default":
UpperCAmelCase_ = {0: "batch"}
return outputs
@property
def lowercase__ ( self : List[str] ) -> float:
'''simple docstring'''
return 1e-4
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : "PreTrainedTokenizerBase" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = super().generate_dummy_inputs(
preprocessor=_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase_ = torch.zeros_like(inputs["input_ids"] )
# make every second token global
UpperCAmelCase_ = 1
return inputs
| 82
|
def _A ( __snake_case :int = 400_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693
| 0
|
'''simple docstring'''
import sys
import turtle
def _lowerCAmelCase ( __snake_case : tuple[float, float] , __snake_case : tuple[float, float] ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def _lowerCAmelCase ( __snake_case : tuple[float, float] , __snake_case : tuple[float, float] , __snake_case : tuple[float, float] , __snake_case : int , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(__snake_case , get_mid(__snake_case , __snake_case ) , get_mid(__snake_case , __snake_case ) , depth - 1 )
triangle(__snake_case , get_mid(__snake_case , __snake_case ) , get_mid(__snake_case , __snake_case ) , depth - 1 )
triangle(__snake_case , get_mid(__snake_case , __snake_case ) , get_mid(__snake_case , __snake_case ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
lowercase__ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
lowercase__ : Optional[Any] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 338
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Optional[Any] = batch_size
__A : Union[str, Any] = image_size
__A : Optional[int] = patch_size
__A : int = num_channels
__A : int = is_training
__A : List[Any] = use_labels
__A : Optional[int] = hidden_size
__A : Union[str, Any] = num_hidden_layers
__A : Optional[Any] = num_attention_heads
__A : List[str] = intermediate_size
__A : Any = hidden_act
__A : Optional[Any] = hidden_dropout_prob
__A : List[str] = attention_probs_dropout_prob
__A : Union[str, Any] = type_sequence_label_size
__A : Optional[int] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__A : Optional[int] = (image_size // patch_size) ** 2
__A : Dict = num_patches + 1
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__A : str = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, pixel_values
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = FlaxViTModel(config=_UpperCAmelCase)
__A : Any = model(_UpperCAmelCase)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__A : int = (self.image_size, self.image_size)
__A : Any = (self.patch_size, self.patch_size)
__A : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = self.type_sequence_label_size
__A : List[Any] = FlaxViTForImageClassification(config=_UpperCAmelCase)
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__A : List[str] = 1
__A : Optional[int] = FlaxViTForImageClassification(_UpperCAmelCase)
__A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__A : Optional[int] = model(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = FlaxViTModelTester(self)
__A : Union[str, Any] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(_UpperCAmelCase)
__A : Optional[int] = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Dict = [*signature.parameters.keys()]
__A : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__A : str = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[Any] = model_class(_UpperCAmelCase)
@jax.jit
def model_jitted(_UpperCAmelCase , **_UpperCAmelCase):
return model(pixel_values=_UpperCAmelCase , **_UpperCAmelCase)
with self.subTest('JIT Enabled'):
__A : Optional[Any] = model_jitted(**_UpperCAmelCase).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
__A : Optional[Any] = model_jitted(**_UpperCAmelCase).to_tuple()
self.assertEqual(len(_UpperCAmelCase) , len(_UpperCAmelCase))
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__A : Optional[Any] = model_class_name.from_pretrained('google/vit-base-patch16-224')
__A : Dict = model(np.ones((1, 3, 224, 224)))
self.assertIsNotNone(_UpperCAmelCase)
| 338
| 1
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCAmelCase_ : Optional[int] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
a_ : str = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
a_ : Dict = torch.manual_seed(0 )
a_ : Any = pipe.dual_guided(
prompt='first prompt' , image=SCREAMING_SNAKE_CASE__ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : List[str] = generator.manual_seed(0 )
a_ : Any = pipe.dual_guided(
prompt='first prompt' , image=SCREAMING_SNAKE_CASE__ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
a_ : Tuple = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Dict = 'cyberpunk 2077'
a_ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
a_ : Dict = torch.manual_seed(0 )
a_ : List[Any] = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
a_ : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : int = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
a_ : Optional[Any] = 'A painting of a squirrel eating a burger '
a_ : List[str] = torch.manual_seed(0 )
a_ : Dict = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
a_ : str = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : List[Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
a_ : Dict = pipe.image_variation(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , output_type='numpy' ).images
a_ : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Tuple = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 570
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str , __A : str , __A : PreTrainedTokenizer , __A : int , __A : Optional[int] = None , ) -> int:
"""simple docstring"""
a_ : List[str] = {}
if train_file is not None:
a_ : Dict = [train_file]
if eval_file is not None:
a_ : str = [eval_file]
if test_file is not None:
a_ : Any = [test_file]
a_ : Any = datasets.load_dataset('csv' , data_files=__A )
a_ : Optional[int] = list(ds[list(files.keys() )[0]].features.keys() )
a_ : Any = features_name.pop(__A )
a_ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
a_ : str = {label: i for i, label in enumerate(__A )}
a_ : Optional[Any] = tokenizer.model_input_names
a_ : Union[str, Any] = {}
if len(__A ) == 1:
for k in files.keys():
a_ : Union[str, Any] = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__A , max_length=__A , padding='max_length' ) , batched=__A , )
elif len(__A ) == 2:
for k in files.keys():
a_ : Optional[Any] = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__A , max_length=__A , padding='max_length' , ) , batched=__A , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
a_ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
a_ : Any = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
a_ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
a_ : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
a_ : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
a_ : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
a_ : List[str] = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
a_ : List[str] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
a_ : Optional[int] = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
a_ : Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
a_ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
a_ : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCAmelCase_ : List[Any] = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : int = field(metadata={'''help''': '''Which column contains the label'''} )
snake_case__ : str = field(default=lowercase__ , metadata={'''help''': '''The path of the training file'''} )
snake_case__ : Optional[str] = field(default=lowercase__ , metadata={'''help''': '''The path of the development file'''} )
snake_case__ : Optional[str] = field(default=lowercase__ , metadata={'''help''': '''The path of the test file'''} )
snake_case__ : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ : bool = field(
default=lowercase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
snake_case__ : bool = field(default=lowercase__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case__ : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
"""simple docstring"""
a_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
a_ , a_ , a_ : Dict = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a_ , a_ , a_ , a_ : Any = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__A , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
a_ : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__A ) , labelaid=__A , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
a_ : Any = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , )
def compute_metrics(__A : EvalPrediction ) -> Dict:
a_ : Union[str, Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
a_ : str = TFTrainer(
model=__A , args=__A , train_dataset=__A , eval_dataset=__A , compute_metrics=__A , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a_ : int = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
a_ : Optional[int] = trainer.evaluate()
a_ : int = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(__A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(__A )
return results
if __name__ == "__main__":
main()
| 570
| 1
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : str = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class _a (a__):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'mctct'
def __init__( self , A__=80_65 , A__=15_36 , A__=36 , A__=61_44 , A__=4 , A__=3_84 , A__=9_20 , A__=1E-5 , A__=0.3 , A__="relu" , A__=0.02 , A__=0.3 , A__=0.3 , A__=1 , A__=0 , A__=2 , A__=1 , A__=0.3 , A__=1 , A__=(7,) , A__=(3,) , A__=80 , A__=1 , A__=None , A__="sum" , A__=False , **A__ , ) -> str:
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = attention_head_dim
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = layerdrop
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = bos_token_id
_SCREAMING_SNAKE_CASE = eos_token_id
_SCREAMING_SNAKE_CASE = conv_glu_dim
_SCREAMING_SNAKE_CASE = conv_dropout
_SCREAMING_SNAKE_CASE = num_conv_layers
_SCREAMING_SNAKE_CASE = input_feat_per_channel
_SCREAMING_SNAKE_CASE = input_channels
_SCREAMING_SNAKE_CASE = conv_channels
_SCREAMING_SNAKE_CASE = ctc_loss_reduction
_SCREAMING_SNAKE_CASE = ctc_zero_infinity
# prevents config testing fail with exporting to json
_SCREAMING_SNAKE_CASE = list(_A )
_SCREAMING_SNAKE_CASE = list(_A )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
F"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
| 715
|
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head("""https://huggingface.co""" )
| 0
| 0
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase__ : Tuple = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase__ : Optional[Any] = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase__ : Tuple = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
lowerCamelCase__ : Optional[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
lowerCamelCase__ : Dict = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
lowerCamelCase__ : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCamelCase__ : Any = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCamelCase__ : List[str] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__lowerCAmelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Any = DPRContextEncoderTokenizer
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : List[str] = DPRQuestionEncoderTokenizer
lowerCamelCase__ : Any = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
lowerCamelCase__ : int = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
lowerCamelCase__ : Optional[int] = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase_ )
class _snake_case :
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
elif titles is None or texts is None:
lowercase__ : Union[str, Any] = titles if texts is None else texts
return super().__call__(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase__ : Optional[int] = titles if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else [titles]
lowercase__ : List[Any] = texts if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else [texts]
lowercase__ : int = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = questions if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else [questions] * n_passages
assert len(SCREAMING_SNAKE_CASE_) == len(
SCREAMING_SNAKE_CASE_), f'There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE_)} titles and {len(SCREAMING_SNAKE_CASE_)} texts.'
lowercase__ : Dict = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_)["""input_ids"""]
lowercase__ : Union[str, Any] = super().__call__(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_)["""input_ids"""]
lowercase__ : Union[str, Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
]
}
if return_attention_mask is not False:
lowercase__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
lowercase__ : List[str] = attention_mask
return self.pad(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 64 , SCREAMING_SNAKE_CASE_ = 4 , ):
'''simple docstring'''
lowercase__ : Dict = reader_input["""input_ids"""]
lowercase__ , lowercase__ , lowercase__ : str = reader_output[:3]
lowercase__ : List[Any] = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = sorted(range(SCREAMING_SNAKE_CASE_) , reverse=SCREAMING_SNAKE_CASE_ , key=relevance_logits.__getitem__)
lowercase__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
lowercase__ : Dict = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
lowercase__ : Optional[Any] = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase__ : int = sequence_ids.index(self.pad_token_id)
else:
lowercase__ : Optional[int] = len(SCREAMING_SNAKE_CASE_)
lowercase__ : int = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE_ , top_spans=SCREAMING_SNAKE_CASE_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE_ , start_index=SCREAMING_SNAKE_CASE_ , end_index=SCREAMING_SNAKE_CASE_ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(SCREAMING_SNAKE_CASE_) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Tuple = []
for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE_):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
lowercase__ : Optional[Any] = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: x[1] , reverse=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
lowercase__ : Dict = end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(SCREAMING_SNAKE_CASE_) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase_ )
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ ):
__lowerCAmelCase : int = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Any = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Any = READER_PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = ['input_ids', 'attention_mask']
__lowerCAmelCase : int = DPRReaderTokenizer
| 12
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A__ ( ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , )
SCREAMING_SNAKE_CASE__: Any= parser.parse_args()
return args
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
if not len(snake_case_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= imgs[0].size
SCREAMING_SNAKE_CASE__: Optional[Any]= Image.new('''RGB''' , size=(cols * w, rows * h) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= grid.size
for i, img in enumerate(snake_case_ ):
grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) )
return grid
def A__ ( snake_case_ : Tuple , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=42 , ):
SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(pipeline.device ).manual_seed(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images
SCREAMING_SNAKE_CASE__: str= int(math.sqrt(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase_ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowercase_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowercase_ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowercase_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowercase_ : Dict = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase_ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowercase_ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowercase_ : Any = unet.to(torch.device('cuda', args.cuda_id))
lowercase_ : str = pipeline.to(unet.device)
lowercase_ , lowercase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowercase_ : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 64
| 0
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__a: Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Optional[Any]:
def run_func(__snake_case ):
@wraps(__UpperCamelCase )
def run_in_eager_mode(*__snake_case , **__snake_case ):
return func(*__UpperCamelCase , **__UpperCamelCase )
@wraps(__UpperCamelCase )
@tf.function(experimental_compile=__UpperCamelCase )
def run_in_graph_mode(*__snake_case , **__snake_case ):
return func(*__UpperCamelCase , **__UpperCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> ["tf.Tensor"]:
_UpperCAmelCase = random.Random()
_UpperCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__UpperCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = '''TensorFlow'''
@property
def lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return tf.__version__
def lowerCamelCase ( self : str , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
# initialize GPU on separate process
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_inference_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_speed(_inference )
def lowerCamelCase ( self : str , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_train_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_speed(_train )
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : List[Any] ) -> Tuple:
"""simple docstring"""
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase__ )
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_inference_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_memory(_inference )
def lowerCamelCase ( self : int , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase__ )
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_UpperCAmelCase = self._prepare_train_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_memory(_train )
def lowerCamelCase ( self : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : List[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_UpperCAmelCase = (
hasattr(UpperCamelCase__ , """architectures""" )
and isinstance(config.architectures , UpperCamelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_UpperCAmelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = model_cls(UpperCamelCase__ )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_UpperCAmelCase = TF_MODEL_MAPPING[config.__class__](UpperCamelCase__ )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase = config.vocab_size if hasattr(UpperCamelCase__ , """vocab_size""" ) else config.encoder.vocab_size
_UpperCAmelCase = random_input_ids(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , training=UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(UpperCamelCase__ , training=UpperCamelCase__ )
_UpperCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCamelCase ( self : str , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_UpperCAmelCase = (
hasattr(UpperCamelCase__ , """architectures""" )
and isinstance(config.architectures , UpperCamelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_UpperCAmelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = model_cls(UpperCamelCase__ )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_UpperCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCamelCase__ )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase = config.vocab_size if hasattr(UpperCamelCase__ , """vocab_size""" ) else config.encoder.vocab_size
_UpperCAmelCase = random_input_ids(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_UpperCAmelCase = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )[0]
_UpperCAmelCase = tf.gradients(UpperCamelCase__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_UpperCAmelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )[0]
_UpperCAmelCase = tf.gradients(UpperCamelCase__ , model.trainable_variables )
return gradients
_UpperCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCamelCase ( self : Dict , lowerCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(UpperCamelCase__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_UpperCAmelCase = timeit.repeat(
UpperCamelCase__ , repeat=self.args.repeat , number=10 , )
return min(UpperCamelCase__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def lowerCamelCase ( self : Dict , lowerCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_UpperCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won\'t log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_UpperCAmelCase = '''N/A'''
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_UpperCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_UpperCAmelCase = nvml.nvmlDeviceGetMemoryInfo(UpperCamelCase__ )
_UpperCAmelCase = meminfo.used
_UpperCAmelCase = Memory(UpperCamelCase__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_UpperCAmelCase = None
else:
_UpperCAmelCase = measure_peak_memory_cpu(UpperCamelCase__ )
_UpperCAmelCase = Memory(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_UpperCAmelCase = stop_memory_tracing(UpperCamelCase__ )
if memory is None:
_UpperCAmelCase = summary.total
else:
_UpperCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 711
|
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> list[int]:
_UpperCAmelCase = [0 for i in range(len(__snake_case ) )]
# initialize interval's left pointer and right pointer
_UpperCAmelCase , _UpperCAmelCase = 0, 0
for i in range(1 , len(__snake_case ) ):
# case when current index is inside the interval
if i <= right_pointer:
_UpperCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
_UpperCAmelCase = min_edge
while go_next(__snake_case , __snake_case , __snake_case ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
_UpperCAmelCase , _UpperCAmelCase = i, i + z_result[i] - 1
return z_result
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> bool:
return i + z_result[i] < len(__snake_case ) and s[z_result[i]] == s[i + z_result[i]]
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> int:
_UpperCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
_UpperCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(__snake_case ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 402
| 0
|
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase =logging.getLogger()
def __a ( ) -> int:
'''simple docstring'''
A__ = argparse.ArgumentParser()
parser.add_argument("-f" )
A__ = parser.parse_args()
return args.f
def __a ( A ) -> List[Any]:
'''simple docstring'''
A__ = {}
A__ = os.path.join(_snake_case , "all_results.json" )
if os.path.exists(_snake_case ):
with open(_snake_case , "r" ) as f:
A__ = json.load(_snake_case )
else:
raise ValueError(f"""can\'t find {path}""" )
return results
def __a ( ) -> int:
'''simple docstring'''
A__ = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
__UpperCAmelCase =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase__ ( lowerCAmelCase_ ):
@classmethod
def lowercase_ ( cls ):
'''simple docstring'''
A__ = tempfile.mkdtemp()
A__ = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
A__ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def lowercase_ ( cls ):
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n """.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
A__ = get_results(__snake_case )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__snake_case , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n """.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
A__ = get_results(__snake_case )
self.assertLess(result["perplexity"] , 1_00 )
self.assertTrue(os.path.exists(os.path.join(__snake_case , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n """.split()
run_command(self._launch_args + testargs )
A__ = get_results(__snake_case )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(__snake_case , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase_ ( self ):
'''simple docstring'''
A__ = 7 if get_gpu_count() > 1 else 2
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n """.split()
run_command(self._launch_args + testargs )
A__ = get_results(__snake_case )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__snake_case , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n """.split()
run_command(self._launch_args + testargs )
A__ = get_results(__snake_case )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(__snake_case , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n """.split()
run_command(self._launch_args + testargs )
A__ = get_results(__snake_case )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__snake_case , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n """.split()
run_command(self._launch_args + testargs )
A__ = get_results(__snake_case )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(__snake_case , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n """.split()
run_command(self._launch_args + testargs )
A__ = get_results(__snake_case )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(__snake_case , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case , "translation_no_trainer" ) ) )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = logging.StreamHandler(sys.stdout )
logger.addHandler(__snake_case )
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n """.split()
run_command(self._launch_args + testargs )
A__ = get_results(__snake_case )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n """.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
A__ = get_results(__snake_case )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__snake_case , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case , "image_classification_no_trainer" ) ) )
| 337
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'encoder-decoder'
SCREAMING_SNAKE_CASE = True
def __init__( self , **__snake_case ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__a =kwargs.pop('encoder' )
__a =encoder_config.pop('model_type' )
__a =kwargs.pop('decoder' )
__a =decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__a =AutoConfig.for_model(__snake_case , **__snake_case )
__a =AutoConfig.for_model(__snake_case , **__snake_case )
__a =True
@classmethod
def __magic_name__ ( cls , __snake_case , __snake_case , **__snake_case ) -> PretrainedConfig:
'''simple docstring'''
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
__a =True
__a =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =copy.deepcopy(self.__dict__ )
__a =self.encoder.to_dict()
__a =self.decoder.to_dict()
__a =self.__class__.model_type
return output
| 242
| 0
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCAmelCase__ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) ,supervised_keys=_SCREAMING_SNAKE_CASE ,)
def __lowercase ( self : List[Any] ,_a : Union[str, Any] ,_a : List[Any] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'examples': get_test_dummy_examples()} )]
def __lowercase ( self : str ,_a : List[Any] ,_a : Union[str, Any] ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_SCREAMING_SNAKE_CASE )
class UpperCAmelCase__ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) ,supervised_keys=_SCREAMING_SNAKE_CASE ,)
def __lowercase ( self : Tuple ,_a : List[str] ,_a : Dict ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'examples': get_test_nested_examples()} )
]
def __lowercase ( self : str ,_a : str ,_a : int ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ():
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def UpperCAmelCase_ ():
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class UpperCAmelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
@require_beam
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_a : int = DummyBeamDataset(cache_dir=_SCREAMING_SNAKE_CASE ,beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_SCREAMING_SNAKE_CASE ,builder.name ,'default' ,'0.0.0' ,F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features ,datasets.Features({'content': datasets.Value('string' )} ) )
_a : List[str] = builder.as_dataset()
self.assertEqual(dset['train'].num_rows ,_SCREAMING_SNAKE_CASE )
self.assertEqual(dset['train'].info.splits['train'].num_examples ,_SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset['train'][0] ,get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] ,get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE ,builder.name ,'default' ,'0.0.0' ,'dataset_info.json' ) ) )
del dset
@require_beam
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
import apache_beam as beam
_a : int = beam.io.parquetio.WriteToParquet
_a : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_a : Optional[Any] = DummyBeamDataset(cache_dir=_SCREAMING_SNAKE_CASE ,beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
_a : Optional[Any] = partial(_SCREAMING_SNAKE_CASE ,num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_SCREAMING_SNAKE_CASE ,builder.name ,'default' ,'0.0.0' ,F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_SCREAMING_SNAKE_CASE ,builder.name ,'default' ,'0.0.0' ,F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features ,datasets.Features({'content': datasets.Value('string' )} ) )
_a : int = builder.as_dataset()
self.assertEqual(dset['train'].num_rows ,_SCREAMING_SNAKE_CASE )
self.assertEqual(dset['train'].info.splits['train'].num_examples ,_SCREAMING_SNAKE_CASE )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) ,sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE ,builder.name ,'default' ,'0.0.0' ,'dataset_info.json' ) ) )
del dset
@require_beam
def __lowercase ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_a : List[Any] = DummyBeamDataset(cache_dir=_SCREAMING_SNAKE_CASE )
self.assertRaises(datasets.builder.MissingBeamOptions ,builder.download_and_prepare )
@require_beam
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Tuple = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_a : int = NestedBeamDataset(cache_dir=_SCREAMING_SNAKE_CASE ,beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_SCREAMING_SNAKE_CASE ,builder.name ,'default' ,'0.0.0' ,F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features ,datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
_a : str = builder.as_dataset()
self.assertEqual(dset['train'].num_rows ,_SCREAMING_SNAKE_CASE )
self.assertEqual(dset['train'].info.splits['train'].num_examples ,_SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset['train'][0] ,get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] ,get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE ,builder.name ,'default' ,'0.0.0' ,'dataset_info.json' ) ) )
del dset
| 709
|
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase_ (__a : str = "isbn/0140328726" ):
"""simple docstring"""
_a : Dict = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
_a : int = f"""{olid} is not a valid Open Library olid"""
raise ValueError(__a )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def UpperCAmelCase_ (__a : dict ):
"""simple docstring"""
_a : List[str] = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
_a : Union[str, Any] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_a : Union[str, Any] = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
_a : Dict = data['First sentence']['value']
for key, value in data.items():
if isinstance(__a , __a ):
_a : str = ', '.join(__a )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__lowerCAmelCase = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
__lowerCAmelCase = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 319
| 0
|
"""simple docstring"""
import cmath
import math
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> complex:
UpperCAmelCase__ : str = math.radians(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = math.radians(lowerCAmelCase )
# Convert voltage and current to rectangular form
UpperCAmelCase__ : Union[str, Any] = cmath.rect(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[str] = cmath.rect(lowerCAmelCase , lowerCAmelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182
|
"""simple docstring"""
import requests
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
UpperCAmelCase__ : List[str] = {"""Content-Type""": """application/json"""}
UpperCAmelCase__ : List[str] = requests.post(lowerCAmelCase , json={"""text""": message_body} , headers=lowerCAmelCase )
if response.status_code != 2_00:
UpperCAmelCase__ : str = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 182
| 1
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase_ = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> Tuple:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> Dict:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple ) -> Any:
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase__ : List[str] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: Dict ) -> Optional[int]:
if exitstatus == 5:
UpperCamelCase__ : List[str] = 0
# Doctest custom flag to ignore output.
UpperCAmelCase_ = doctest.register_optionflag('IGNORE_RESULT')
UpperCAmelCase_ = doctest.OutputChecker
class lowercase__ ( lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self, __magic_name__, __magic_name__, __magic_name__ )
UpperCAmelCase_ = CustomOutputChecker
UpperCAmelCase_ = HfDoctestModule
UpperCAmelCase_ = HfDocTestParser
| 704
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] ) -> Union[str, Any]:
UpperCamelCase__ : List[str] = model.config
UpperCamelCase__ : Optional[Any] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
UpperCamelCase__ : Union[str, Any] = MBartConfig(
is_decoder=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , add_cross_attention=__UpperCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__UpperCAmelCase , add_final_layer_norm=__UpperCAmelCase , )
return encoder_config, decoder_config
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> Tuple:
if "encoder.model" in name:
UpperCamelCase__ : str = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
UpperCamelCase__ : Optional[int] = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
UpperCamelCase__ : Dict = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase__ : Optional[int] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
UpperCamelCase__ : Optional[Any] = '''encoder.''' + name
if "attn.proj" in name:
UpperCamelCase__ : Optional[int] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
UpperCamelCase__ : str = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCamelCase__ : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase__ : Any = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase__ : List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
UpperCamelCase__ : Optional[Any] = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
UpperCamelCase__ : Any = '''encoder.layernorm.bias'''
return name
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: str ) -> Dict:
for key in orig_state_dict.copy().keys():
UpperCamelCase__ : Dict = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
UpperCamelCase__ : Tuple = key.split('''.''' )
UpperCamelCase__ : str = int(key_split[3] )
UpperCamelCase__ : List[Any] = int(key_split[5] )
UpperCamelCase__ : Optional[Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ : Tuple = val[:dim, :]
UpperCamelCase__ : Dict = val[dim : dim * 2, :]
UpperCamelCase__ : Dict = val[-dim:, :]
else:
UpperCamelCase__ : Optional[int] = val[:dim]
UpperCamelCase__ : str = val[dim : dim * 2]
UpperCamelCase__ : int = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
UpperCamelCase__ : Any = val
return orig_state_dict
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: List[Any]=None , __UpperCAmelCase: Tuple=False ) -> Optional[int]:
# load original model
UpperCamelCase__ : List[Any] = DonutModel.from_pretrained(__UpperCAmelCase ).eval()
# load HuggingFace model
UpperCamelCase__ ,UpperCamelCase__ : List[str] = get_configs(__UpperCAmelCase )
UpperCamelCase__ : int = DonutSwinModel(__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = MBartForCausalLM(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = VisionEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
model.eval()
UpperCamelCase__ : List[Any] = original_model.state_dict()
UpperCamelCase__ : List[Any] = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# verify results on scanned document
UpperCamelCase__ : Optional[int] = load_dataset('''hf-internal-testing/example-documents''' )
UpperCamelCase__ : Any = dataset['''test'''][0]['''image'''].convert('''RGB''' )
UpperCamelCase__ : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained(__UpperCAmelCase , from_slow=__UpperCAmelCase )
UpperCamelCase__ : Dict = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
UpperCamelCase__ : Any = DonutProcessor(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : str = processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
UpperCamelCase__ : Optional[int] = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
UpperCamelCase__ : int = '''When is the coffee break?'''
UpperCamelCase__ : List[str] = task_prompt.replace('''{user_input}''' , __UpperCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
UpperCamelCase__ : int = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
UpperCamelCase__ : Optional[int] = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
UpperCamelCase__ : Any = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
UpperCamelCase__ : List[Any] = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
UpperCamelCase__ : Dict = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
UpperCamelCase__ : Tuple = original_model.decoder.tokenizer(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors='''pt''' )[
'''input_ids'''
]
UpperCamelCase__ : List[str] = original_model.encoder.model.patch_embed(__UpperCAmelCase )
UpperCamelCase__ ,UpperCamelCase__ : List[str] = model.encoder.embeddings(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
# verify encoder hidden states
UpperCamelCase__ : Dict = original_model.encoder(__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = model.encoder(__UpperCAmelCase ).last_hidden_state
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-2 )
# verify decoder hidden states
UpperCamelCase__ : Optional[Any] = original_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).logits
UpperCamelCase__ : str = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ).logits
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
UpperCAmelCase_ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 369
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase: List[Any] = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =SpeechTaTokenizer
snake_case =False
snake_case =True
def __UpperCamelCase ( self) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
a__ =SpeechTaTokenizer(lowercase_)
a__ =AddedToken('<mask>' , lstrip=lowercase_ , rstrip=lowercase_)
a__ =mask_token
tokenizer.add_special_tokens({'mask_token': mask_token})
tokenizer.add_tokens(['<ctc_blank>'])
tokenizer.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , lowercase_) -> str:
a__ ='this is a test'
a__ ='this is a test'
return input_text, output_text
def __UpperCamelCase ( self , lowercase_ , lowercase_=False , lowercase_=20 , lowercase_=5) -> str:
a__ , a__ =self.get_input_output_texts(lowercase_)
a__ =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
a__ =tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_)
return text, ids
def __UpperCamelCase ( self) -> str:
a__ ='<pad>'
a__ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_) , lowercase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_) , lowercase_)
def __UpperCamelCase ( self) -> str:
a__ =list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-4] , 'œ')
self.assertEqual(vocab_keys[-2] , '<mask>')
self.assertEqual(vocab_keys[-1] , '<ctc_blank>')
self.assertEqual(len(lowercase_) , 81)
def __UpperCamelCase ( self) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 79)
def __UpperCamelCase ( self) -> str:
a__ =self.get_tokenizers(do_lower_case=lowercase_)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
a__ =tokenizer.vocab_size
a__ =len(lowercase_)
self.assertNotEqual(lowercase_ , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
a__ =['aaaaa bbbbbb', 'cccccccccdddddddd']
a__ =tokenizer.add_tokens(lowercase_)
a__ =tokenizer.vocab_size
a__ =len(lowercase_)
self.assertNotEqual(lowercase_ , 0)
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , len(lowercase_))
self.assertEqual(lowercase_ , all_size + len(lowercase_))
a__ =tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=lowercase_)
self.assertGreaterEqual(len(lowercase_) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
a__ ={'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
a__ =tokenizer.add_special_tokens(lowercase_)
a__ =tokenizer.vocab_size
a__ =len(lowercase_)
self.assertNotEqual(lowercase_ , 0)
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , len(lowercase_))
self.assertEqual(lowercase_ , all_size_a + len(lowercase_))
a__ =tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=lowercase_)
self.assertGreaterEqual(len(lowercase_) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =self.get_tokenizer()
a__ =tokenizer.tokenize('This is a test')
# fmt: off
self.assertListEqual(lowercase_ , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'])
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
a__ =tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowercase_ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'])
a__ =tokenizer.convert_tokens_to_ids(lowercase_)
# fmt: off
self.assertListEqual(lowercase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26])
# fmt: on
a__ =tokenizer.convert_ids_to_tokens(lowercase_)
self.assertListEqual(
lowercase_ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'])
@slow
def __UpperCamelCase ( self) -> Optional[int]:
# Use custom sequence because this tokenizer does not handle numbers.
a__ =[
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
a__ ={
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=lowercase_ , )
| 20
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
set_seed(7_70)
_lowerCAmelCase = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
_lowerCAmelCase = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
_lowerCAmelCase = os.path.dirname(os.path.abspath(__file__))
_lowerCAmelCase = os.path.join(os.path.expanduser('~'), '.cache')
_lowerCAmelCase = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def UpperCamelCase ( _A , _A=False ) -> Dict:
lowercase : Union[str, Any] = model_type
if use_small:
key += "_small"
return os.path.join(_A , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def UpperCamelCase ( _A , _A ) -> List[Any]:
os.makedirs(_A , exist_ok=_A )
hf_hub_download(repo_id=_A , filename=_A , local_dir=_A )
def UpperCamelCase ( _A , _A , _A=False , _A="text" ) -> List[Any]:
if model_type == "text":
lowercase : Any = BarkSemanticModel
lowercase : List[Any] = BarkSemanticConfig
lowercase : List[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowercase : Tuple = BarkCoarseModel
lowercase : str = BarkCoarseConfig
lowercase : Optional[int] = BarkCoarseGenerationConfig
elif model_type == "fine":
lowercase : str = BarkFineModel
lowercase : Optional[Any] = BarkFineConfig
lowercase : Optional[int] = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowercase : Optional[int] = F"""{model_type}_small""" if use_small else model_type
lowercase : str = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_A ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
lowercase : Dict = torch.load(_A , map_location=_A )
# this is a hack
lowercase : int = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
lowercase : str = model_args["""vocab_size"""]
lowercase : Optional[Any] = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowercase : Optional[int] = model_args.pop("""n_head""" )
lowercase : Union[str, Any] = model_args.pop("""n_embd""" )
lowercase : Dict = model_args.pop("""n_layer""" )
lowercase : List[str] = ConfigClass(**checkpoint["""model_args"""] )
lowercase : Dict = ModelClass(config=_A )
lowercase : List[str] = GenerationConfigClass()
lowercase : int = model_generation_config
lowercase : List[Any] = checkpoint["""model"""]
# fixup checkpoint
lowercase : Union[str, Any] = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(_A ):
# replace part of the key with corresponding layer name in HF implementation
lowercase : List[str] = k[len(_A ) :]
for old_layer_name in new_layer_name_dict:
lowercase : int = new_k.replace(_A , new_layer_name_dict[old_layer_name] )
lowercase : List[Any] = state_dict.pop(_A )
lowercase : Dict = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowercase : Dict = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
lowercase : Optional[int] = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowercase : Dict = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(_A ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(_A ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(_A , strict=_A )
lowercase : Union[str, Any] = model.num_parameters(exclude_embeddings=_A )
lowercase : Union[str, Any] = checkpoint["""best_val_loss"""].item()
logger.info(F"""model loaded: {round(n_params/1e6 , 1 )}M params, {round(_A , 3 )} loss""" )
model.eval()
model.to(_A )
del checkpoint, state_dict
return model
def UpperCamelCase ( _A , _A=False , _A="text" ) -> Dict:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowercase : Optional[Any] = """cpu""" # do conversion on cpu
lowercase : Dict = _get_ckpt_path(_A , use_small=_A )
lowercase : Any = _load_model(_A , _A , model_type=_A , use_small=_A )
# load bark initial model
lowercase : Tuple = _bark_load_model(_A , """cpu""" , model_type=_A , use_small=_A )
if model_type == "text":
lowercase : str = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=_A ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
lowercase : Union[str, Any] = 5
lowercase : Tuple = 10
if model_type in ["text", "coarse"]:
lowercase : Any = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
lowercase : Optional[Any] = bark_model(_A )[0]
lowercase : Tuple = model(_A )
# take last logits
lowercase : int = output_new_model_total.logits[:, [-1], :]
else:
lowercase : Dict = 3
lowercase : Any = 8
lowercase : str = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowercase : str = model(_A , _A )
lowercase : List[Any] = bark_model(_A , _A )
lowercase : Any = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
def UpperCamelCase ( _A , _A , _A , _A , _A , _A , ) -> Optional[int]:
lowercase : str = os.path.join(_A , _A )
lowercase : Dict = BarkSemanticConfig.from_pretrained(os.path.join(_A , """config.json""" ) )
lowercase : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(_A , """config.json""" ) )
lowercase : List[Any] = BarkFineConfig.from_pretrained(os.path.join(_A , """config.json""" ) )
lowercase : List[Any] = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
lowercase : Any = BarkSemanticModel.from_pretrained(_A )
lowercase : str = BarkCoarseModel.from_pretrained(_A )
lowercase : int = BarkFineModel.from_pretrained(_A )
lowercase : Optional[Any] = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
lowercase : int = BarkConfig.from_sub_model_configs(
_A , _A , _A , _A )
lowercase : Dict = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowercase : str = BarkModel(_A )
lowercase : int = semantic
lowercase : List[Any] = coarseAcoustic
lowercase : Union[str, Any] = fineAcoustic
lowercase : Any = codec
lowercase : int = bark_generation_config
Path(_A ).mkdir(exist_ok=_A )
bark.save_pretrained(_A , repo_id=_A , push_to_hub=_A )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
_lowerCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 264
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
UpperCamelCase = {
'''google/rembert''': 2_5_6,
}
UpperCamelCase = '''▁'''
class lowerCamelCase__ ( UpperCAmelCase ):
lowerCamelCase_ : Any = VOCAB_FILES_NAMES
lowerCamelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Optional[int] = RemBertTokenizer
def __init__(self : Optional[Any] , _snake_case : Optional[int]=None , _snake_case : int=None , _snake_case : Optional[Any]=True , _snake_case : str=True , _snake_case : List[Any]=False , _snake_case : Union[str, Any]="[CLS]" , _snake_case : List[str]="[SEP]" , _snake_case : List[Any]="<unk>" , _snake_case : Union[str, Any]="[SEP]" , _snake_case : Optional[int]="<pad>" , _snake_case : Tuple="[CLS]" , _snake_case : Union[str, Any]="[MASK]" , **_snake_case : Any , ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , )
lowerCamelCase_ : List[Any] = do_lower_case
lowerCamelCase_ : Any = remove_space
lowerCamelCase_ : Optional[Any] = keep_accents
lowerCamelCase_ : List[Any] = vocab_file
lowerCamelCase_ : Optional[int] = False if not self.vocab_file else True
def UpperCAmelCase_ (self : str , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ : Dict = [self.sep_token_id]
lowerCamelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ (self : List[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def UpperCAmelCase_ (self : List[str] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ (self : int , _snake_case : str , _snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) )
return
lowerCamelCase_ : Tuple = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 701
|
from PIL import Image
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Image:
def brightness(lowerCamelCase__ ) -> float:
return 1_28 + level + (c - 1_28)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(lowerCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
UpperCamelCase = change_brightness(img, 1_0_0)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 144
| 0
|
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0 , UpperCamelCase__ = 0 ) -> int:
'''simple docstring'''
UpperCAmelCase = right or len(lowerCAmelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowerCAmelCase_ , lowerCAmelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 130
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __a ( lowerCAmelCase_ : Namespace ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(
args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name )
__A = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class lowercase ( snake_case__):
"""simple docstring"""
@staticmethod
def _SCREAMING_SNAKE_CASE ( __UpperCAmelCase : ArgumentParser ) -> Union[str, Any]:
UpperCAmelCase_= parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=__UpperCAmelCase , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=__UpperCAmelCase , default=__UpperCAmelCase , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=__UpperCAmelCase )
def __init__( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : str , *__UpperCAmelCase : Any , ) -> Optional[Any]:
UpperCAmelCase_= logging.get_logger("""transformers-cli/converting""" )
self._logger.info(F"""Loading model {model_type}""" )
UpperCAmelCase_= model_type
UpperCAmelCase_= tf_checkpoint
UpperCAmelCase_= pytorch_dump_output
UpperCAmelCase_= config
UpperCAmelCase_= finetuning_task_name
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
if "ckpt" in self._tf_checkpoint.lower():
UpperCAmelCase_= self._tf_checkpoint
UpperCAmelCase_= """"""
else:
UpperCAmelCase_= self._tf_checkpoint
UpperCAmelCase_= """"""
convert_transfo_xl_checkpoint_to_pytorch(
__UpperCAmelCase , self._config , self._pytorch_dump_output , __UpperCAmelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 593
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase: int =logging.get_logger(__name__)
_UpperCamelCase: str ={'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class __lowercase( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ = '''openai-gpt'''
UpperCamelCase_ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[Any] , _lowerCAmelCase : Tuple=4_0478 , _lowerCAmelCase : Dict=512 , _lowerCAmelCase : List[Any]=768 , _lowerCAmelCase : List[str]=12 , _lowerCAmelCase : List[str]=12 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : List[Any]=1e-5 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : List[Any]="cls_index" , _lowerCAmelCase : str=True , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Tuple=0.1 , **_lowerCAmelCase : List[Any] , ) -> List[str]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_embd
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = afn
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = summary_type
_lowerCAmelCase = summary_use_proj
_lowerCAmelCase = summary_activation
_lowerCAmelCase = summary_first_dropout
_lowerCAmelCase = summary_proj_to_labels
super().__init__(**_lowerCAmelCase )
| 585
|
from __future__ import annotations
import math
class __lowercase:
"""simple docstring"""
def __init__( self : str , _lowerCAmelCase : int ) -> None:
_lowerCAmelCase = size
# approximate the overall size of segment tree with given value
_lowerCAmelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_lowerCAmelCase = [0 for i in range(0 , 4 * size )]
_lowerCAmelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def SCREAMING_SNAKE_CASE_ ( self : Tuple , _lowerCAmelCase : int ) -> int:
return idx * 2
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , _lowerCAmelCase : int ) -> int:
return idx * 2 + 1
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[int] ) -> None:
if left_element == right_element:
_lowerCAmelCase = a[left_element - 1]
else:
_lowerCAmelCase = (left_element + right_element) // 2
self.build(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.build(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(
self.segment_tree[self.left(_lowerCAmelCase )] , self.segment_tree[self.right(_lowerCAmelCase )] )
def SCREAMING_SNAKE_CASE_ ( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> bool:
if self.flag[idx] is True:
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = False
if left_element != right_element:
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = True
_lowerCAmelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_lowerCAmelCase = val
if left_element != right_element:
_lowerCAmelCase = val
_lowerCAmelCase = val
_lowerCAmelCase = True
_lowerCAmelCase = True
return True
_lowerCAmelCase = (left_element + right_element) // 2
self.update(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.update(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(
self.segment_tree[self.left(_lowerCAmelCase )] , self.segment_tree[self.right(_lowerCAmelCase )] )
return True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int | float:
if self.flag[idx] is True:
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = False
if left_element != right_element:
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = True
_lowerCAmelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_lowerCAmelCase = (left_element + right_element) // 2
_lowerCAmelCase = self.query(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = self.query(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return max(_lowerCAmelCase , _lowerCAmelCase )
def __str__( self : Optional[Any] ) -> str:
return str([self.query(1 , 1 , self.size , _lowerCAmelCase , _lowerCAmelCase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_UpperCamelCase: Optional[Any] =[1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_UpperCamelCase: Optional[Any] =15
_UpperCamelCase: Union[str, Any] =SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 585
| 1
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.0_2 , snake_case_=3 , snake_case_=0.6 , snake_case_=None , ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = parent
__UpperCAmelCase: str = batch_size
__UpperCAmelCase: Optional[int] = image_size
__UpperCAmelCase: str = patch_size
__UpperCAmelCase: Dict = num_channels
__UpperCAmelCase: Optional[int] = is_training
__UpperCAmelCase: List[Any] = use_labels
__UpperCAmelCase: Optional[Any] = hidden_size
__UpperCAmelCase: Union[str, Any] = num_hidden_layers
__UpperCAmelCase: List[Any] = num_attention_heads
__UpperCAmelCase: List[str] = intermediate_size
__UpperCAmelCase: Optional[Any] = hidden_act
__UpperCAmelCase: Any = hidden_dropout_prob
__UpperCAmelCase: str = attention_probs_dropout_prob
__UpperCAmelCase: Dict = type_sequence_label_size
__UpperCAmelCase: List[str] = initializer_range
__UpperCAmelCase: List[str] = mask_ratio
__UpperCAmelCase: Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__UpperCAmelCase: Dict = (image_size // patch_size) ** 2
__UpperCAmelCase: Union[str, Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase: int = None
if self.use_labels:
__UpperCAmelCase: List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase: List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: str = TFViTMAEModel(config=snake_case_ )
__UpperCAmelCase: Tuple = model(snake_case_ , training=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Any = TFViTMAEForPreTraining(snake_case_ )
__UpperCAmelCase: List[Any] = model(snake_case_ , training=snake_case_ )
# expected sequence length = num_patches
__UpperCAmelCase: List[str] = (self.image_size // self.patch_size) ** 2
__UpperCAmelCase: int = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__UpperCAmelCase: Optional[Any] = 1
__UpperCAmelCase: Tuple = TFViTMAEForPreTraining(snake_case_ )
__UpperCAmelCase: Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase: Optional[int] = model(snake_case_ , training=snake_case_ )
__UpperCAmelCase: Union[str, Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = self.prepare_config_and_inputs()
((__UpperCAmelCase), (__UpperCAmelCase), (__UpperCAmelCase)): int = config_and_inputs
__UpperCAmelCase: List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__lowerCAmelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = TFViTMAEModelTester(self )
__UpperCAmelCase: int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase: Optional[Any] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__UpperCAmelCase: Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , tf.keras.layers.Layer ) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase: Tuple = model_class(snake_case_ )
__UpperCAmelCase: Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase: Optional[Any] = [*signature.parameters.keys()]
__UpperCAmelCase: Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
np.random.seed(2 )
__UpperCAmelCase, __UpperCAmelCase: str = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase: Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
__UpperCAmelCase: Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__UpperCAmelCase: Optional[int] = model_class(snake_case_ )
__UpperCAmelCase: Union[str, Any] = self._prepare_for_class(snake_case_ , snake_case_ )
__UpperCAmelCase: List[str] = model(snake_case_ , noise=snake_case_ )
__UpperCAmelCase: List[str] = copy.deepcopy(self._prepare_for_class(snake_case_ , snake_case_ ) )
__UpperCAmelCase: Tuple = model(**snake_case_ , noise=snake_case_ )
__UpperCAmelCase: Dict = outputs_dict[0].numpy()
__UpperCAmelCase: int = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def lowercase_ ( self ):
'''simple docstring'''
np.random.seed(2 )
__UpperCAmelCase, __UpperCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase: Dict = int((config.image_size // config.patch_size) ** 2 )
__UpperCAmelCase: Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(snake_case_ ):
__UpperCAmelCase: str = {}
for k, v in inputs_dict.items():
if tf.is_tensor(snake_case_ ):
__UpperCAmelCase: str = v.numpy()
else:
__UpperCAmelCase: Optional[Any] = np.array(snake_case_ )
return inputs_np_dict
for model_class in self.all_model_classes:
__UpperCAmelCase: List[str] = model_class(snake_case_ )
__UpperCAmelCase: Tuple = self._prepare_for_class(snake_case_ , snake_case_ )
__UpperCAmelCase: int = prepare_numpy_arrays(snake_case_ )
__UpperCAmelCase: int = model(snake_case_ , noise=snake_case_ )
__UpperCAmelCase: Union[str, Any] = model(**snake_case_ , noise=snake_case_ )
self.assert_outputs_same(snake_case_ , snake_case_ )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
np.random.seed(2 )
__UpperCAmelCase: Tuple = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
__UpperCAmelCase: Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__UpperCAmelCase: Optional[Any] = tf.constant(snake_case_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__UpperCAmelCase: Union[str, Any] = tf_noise
super().check_pt_tf_models(snake_case_ , snake_case_ , snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
np.random.seed(2 )
__UpperCAmelCase, __UpperCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase: Tuple = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(snake_case_ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(snake_case_ , snake_case_ ),)
if isinstance(snake_case_ , snake_case_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(snake_case_ , """_keras_serializable""" , snake_case_ )
}
__UpperCAmelCase: int = int((config.image_size // config.patch_size) ** 2 )
__UpperCAmelCase: Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__UpperCAmelCase: Union[str, Any] = tf.convert_to_tensor(snake_case_ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
__UpperCAmelCase: Union[str, Any] = main_layer_class(snake_case_ )
__UpperCAmelCase: Any = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
__UpperCAmelCase: Optional[Any] = tf.keras.Model(snake_case_ , outputs=main_layer(snake_case_ ) )
__UpperCAmelCase: List[str] = model(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase: Any = os.path.join(snake_case_ , """keras_model.h5""" )
model.save(snake_case_ )
__UpperCAmelCase: Optional[Any] = tf.keras.models.load_model(
snake_case_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(snake_case_ , tf.keras.Model )
__UpperCAmelCase: int = model(snake_case_ )
self.assert_outputs_same(snake_case_ , snake_case_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
np.random.seed(2 )
__UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase: List[Any] = int((config.image_size // config.patch_size) ** 2 )
__UpperCAmelCase: Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__UpperCAmelCase: Optional[Any] = model_class(snake_case_ )
__UpperCAmelCase: Any = self._prepare_for_class(snake_case_ , snake_case_ )
__UpperCAmelCase: Any = model(snake_case_ , noise=snake_case_ )
if model_class.__name__ == "TFViTMAEModel":
__UpperCAmelCase: Optional[Any] = outputs.last_hidden_state.numpy()
__UpperCAmelCase: Any = 0
else:
__UpperCAmelCase: str = outputs.logits.numpy()
__UpperCAmelCase: str = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
__UpperCAmelCase: int = model_class.from_pretrained(snake_case_ )
__UpperCAmelCase: str = model(snake_case_ , noise=snake_case_ )
if model_class.__name__ == "TFViTMAEModel":
__UpperCAmelCase: int = after_outputs["""last_hidden_state"""].numpy()
__UpperCAmelCase: List[str] = 0
else:
__UpperCAmelCase: Dict = after_outputs["""logits"""].numpy()
__UpperCAmelCase: List[str] = 0
__UpperCAmelCase: Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1e-5 )
def lowercase_ ( self ):
'''simple docstring'''
np.random.seed(2 )
__UpperCAmelCase, __UpperCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase: Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
__UpperCAmelCase: str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__UpperCAmelCase: Optional[int] = model_class(snake_case_ )
__UpperCAmelCase: List[Any] = self._prepare_for_class(snake_case_ , snake_case_ )
__UpperCAmelCase: Dict = model(snake_case_ , noise=snake_case_ )
__UpperCAmelCase: int = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(snake_case_ )
__UpperCAmelCase: List[Any] = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
__UpperCAmelCase: List[str] = model_class.from_config(model.config )
__UpperCAmelCase: Tuple = new_model(snake_case_ ) # Build model
new_model.set_weights(model.get_weights() )
__UpperCAmelCase: Tuple = new_model(snake_case_ , noise=snake_case_ )
self.assert_outputs_same(snake_case_ , snake_case_ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowercase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowercase_ ( self ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(snake_case_ )
def UpperCamelCase__ ( ) -> Dict:
__UpperCAmelCase: List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
'''simple docstring'''
np.random.seed(2 )
__UpperCAmelCase: List[Any] = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
__UpperCAmelCase: Optional[Any] = self.default_image_processor
__UpperCAmelCase: List[str] = prepare_img()
__UpperCAmelCase: Tuple = image_processor(images=snake_case_ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__UpperCAmelCase: Union[str, Any] = ViTMAEConfig()
__UpperCAmelCase: List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__UpperCAmelCase: List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
__UpperCAmelCase: str = model(**snake_case_ , noise=snake_case_ )
# verify the logits
__UpperCAmelCase: int = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , snake_case_ )
__UpperCAmelCase: str = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , snake_case_ , atol=1e-4 )
| 523
|
'''simple docstring'''
import numpy as np
def UpperCamelCase__ ( _lowercase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 523
| 1
|
import torch
def _lowerCAmelCase ( ):
if torch.cuda.is_available():
lowercase__ = torch.cuda.device_count()
else:
lowercase__ = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 700
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 0
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Tuple = (DDPMScheduler,)
def lowerCamelCase(self , **lowerCAmelCase_ ):
A_ : str = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**lowerCAmelCase_ )
return config
def lowerCamelCase(self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def lowerCamelCase(self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def lowerCamelCase(self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def lowerCamelCase(self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def lowerCamelCase(self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def lowerCamelCase(self ):
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def lowerCamelCase(self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def lowerCamelCase(self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : Optional[int] = self.scheduler_classes[0]
A_ : List[Any] = self.get_scheduler_config()
A_ : Dict = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def lowerCamelCase(self ):
A_ : Any = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : List[str] = scheduler_class(**lowerCAmelCase_ )
A_ : Optional[int] = len(lowerCAmelCase_ )
A_ : List[str] = self.dummy_model()
A_ : Optional[int] = self.dummy_sample_deter
A_ : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
A_ : Union[str, Any] = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
A_ : Optional[Any] = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : Union[str, Any] = pred_prev_sample
A_ : str = torch.sum(torch.abs(lowerCAmelCase_ ) )
A_ : Tuple = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def lowerCamelCase(self ):
A_ : int = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config(prediction_type="""v_prediction""" )
A_ : Any = scheduler_class(**lowerCAmelCase_ )
A_ : List[str] = len(lowerCAmelCase_ )
A_ : Dict = self.dummy_model()
A_ : Any = self.dummy_sample_deter
A_ : Tuple = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
A_ : Dict = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
A_ : str = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A_ : int = pred_prev_sample
A_ : List[Any] = torch.sum(torch.abs(lowerCAmelCase_ ) )
A_ : Dict = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def lowerCamelCase(self ):
A_ : Optional[int] = self.scheduler_classes[0]
A_ : Any = self.get_scheduler_config()
A_ : Any = scheduler_class(**lowerCAmelCase_ )
A_ : str = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
A_ : List[str] = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
A_ : Dict = -1
else:
A_ : Optional[int] = timesteps[i + 1]
A_ : Optional[Any] = scheduler.previous_timestep(lowerCAmelCase_ )
A_ : List[Any] = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Optional[int] = self.get_scheduler_config()
A_ : Any = scheduler_class(**lowerCAmelCase_ )
A_ : Tuple = [100, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase_ , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : List[str] = self.scheduler_classes[0]
A_ : List[Any] = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**lowerCAmelCase_ )
A_ : List[Any] = [100, 87, 50, 1, 0]
A_ : str = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : str = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config()
A_ : str = scheduler_class(**lowerCAmelCase_ )
A_ : Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 180
|
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_A : str = LEDTokenizer
_A : List[Any] = LEDTokenizerFast
_A : Dict = True
def lowerCamelCase(self ):
super().setUp()
A_ : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
A_ : Any = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A_ : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A_ : List[str] = {"""unk_token""": """<unk>"""}
A_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
def lowerCamelCase(self , **lowerCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCamelCase(self , **lowerCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase(self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase(self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase(self ):
A_ : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
A_ : Any = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Optional[int] = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
A_ : int = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def lowerCamelCase(self ):
A_ : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Tuple = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="""pt""" )
self.assertIn("""input_ids""" , lowerCAmelCase_ )
self.assertIn("""attention_mask""" , lowerCAmelCase_ )
self.assertNotIn("""labels""" , lowerCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" , lowerCAmelCase_ )
@require_torch
def lowerCamelCase(self ):
A_ : Tuple = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Optional[int] = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase(self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Optional[Any] = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCamelCase(self ):
A_ : Dict = ["""A long paragraph for summarization."""]
A_ : Any = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : int = tokenizer(lowerCAmelCase_ , return_tensors="""pt""" )
A_ : Optional[int] = tokenizer(text_target=lowerCAmelCase_ , return_tensors="""pt""" )
A_ : str = inputs["""input_ids"""]
A_ : Tuple = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase(self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : str = ["""Summary of the text.""", """Another summary."""]
A_ : Dict = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
A_ : Optional[Any] = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
A_ : Any = [[0] * len(lowerCAmelCase_ ) for x in encoded_output["""input_ids"""]]
A_ : Tuple = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , lowerCAmelCase_ )
def lowerCamelCase(self ):
pass
def lowerCamelCase(self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A_ : Any = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
A_ : Optional[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
A_ : Optional[Any] = """A, <mask> AllenNLP sentence."""
A_ : List[Any] = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
A_ : Any = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
A_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
A_ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowerCAmelCase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 180
| 1
|
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ :
def __init__( self : int , a_ : Any , a_ : Union[str, Any]=1_3 , a_ : Dict=7 , a_ : Tuple=True , a_ : Tuple=True , a_ : List[Any]=True , a_ : str=True , a_ : Any=9_9 , a_ : Union[str, Any]=1_6 , a_ : int=3_6 , a_ : List[Any]=6 , a_ : Union[str, Any]=6 , a_ : Any=6 , a_ : int=3_7 , a_ : List[str]="gelu" , a_ : Optional[int]=0.1 , a_ : List[Any]=0.1 , a_ : int=5_1_2 , a_ : str=1_6 , a_ : List[str]=2 , a_ : int=0.0_2 , a_ : List[str]=3 , a_ : List[str]=4 , a_ : Tuple=None , ) -> Union[str, Any]:
snake_case: Optional[int] =parent
snake_case: List[Any] =batch_size
snake_case: List[Any] =seq_length
snake_case: str =is_training
snake_case: Optional[int] =use_input_mask
snake_case: Tuple =use_token_type_ids
snake_case: Dict =use_labels
snake_case: Any =vocab_size
snake_case: List[Any] =embedding_size
snake_case: Optional[int] =hidden_size
snake_case: List[Any] =num_hidden_layers
snake_case: Any =num_hidden_groups
snake_case: List[Any] =num_attention_heads
snake_case: Any =intermediate_size
snake_case: List[str] =hidden_act
snake_case: Tuple =hidden_dropout_prob
snake_case: int =attention_probs_dropout_prob
snake_case: Any =max_position_embeddings
snake_case: Optional[int] =type_vocab_size
snake_case: int =type_sequence_label_size
snake_case: Dict =initializer_range
snake_case: Any =num_labels
snake_case: Optional[Any] =num_choices
snake_case: Tuple =scope
def UpperCamelCase ( self : str ) -> int:
snake_case: Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case: Any =None
if self.use_input_mask:
snake_case: Tuple =random_attention_mask([self.batch_size, self.seq_length] )
snake_case: Union[str, Any] =None
if self.use_token_type_ids:
snake_case: Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case: List[Any] =None
snake_case: Dict =None
snake_case: Union[str, Any] =None
if self.use_labels:
snake_case: str =ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case: List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case: Dict =ids_tensor([self.batch_size] , self.num_choices )
snake_case: Tuple =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCamelCase ( self : List[Any] , a_ : Dict , a_ : Dict , a_ : List[str] , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : List[Any] , a_ : int ) -> Tuple:
snake_case: List[Any] =AlbertModel(config=a_ )
model.to(a_ )
model.eval()
snake_case: int =model(a_ , attention_mask=a_ , token_type_ids=a_ )
snake_case: Union[str, Any] =model(a_ , token_type_ids=a_ )
snake_case: List[Any] =model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self : int , a_ : Any , a_ : List[Any] , a_ : str , a_ : int , a_ : Dict , a_ : str , a_ : Optional[Any] ) -> Union[str, Any]:
snake_case: Tuple =AlbertForPreTraining(config=a_ )
model.to(a_ )
model.eval()
snake_case: Any =model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , sentence_order_label=a_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def UpperCamelCase ( self : Optional[Any] , a_ : Optional[int] , a_ : Tuple , a_ : Union[str, Any] , a_ : List[Any] , a_ : Any , a_ : Optional[Any] , a_ : Tuple ) -> List[Any]:
snake_case: Union[str, Any] =AlbertForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
snake_case: Optional[Any] =model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Tuple , a_ : Any , a_ : Optional[Any] , a_ : Optional[int] , a_ : List[Any] , a_ : str , a_ : Tuple , a_ : Tuple ) -> List[Any]:
snake_case: Optional[int] =AlbertForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
snake_case: List[str] =model(
a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : Union[str, Any] , a_ : int , a_ : Tuple , a_ : Any , a_ : Optional[Any] , a_ : Any , a_ : List[str] , a_ : Optional[int] ) -> int:
snake_case: Dict =self.num_labels
snake_case: List[Any] =AlbertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
snake_case: str =model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : int , a_ : Union[str, Any] , a_ : Any , a_ : Any , a_ : Union[str, Any] , a_ : Optional[Any] , a_ : Optional[Any] , a_ : int ) -> List[Any]:
snake_case: List[Any] =self.num_labels
snake_case: Optional[Any] =AlbertForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
snake_case: int =model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : str , a_ : Optional[int] , a_ : Optional[Any] , a_ : str , a_ : str , a_ : int , a_ : Tuple , a_ : Union[str, Any] ) -> Optional[int]:
snake_case: Any =self.num_choices
snake_case: Tuple =AlbertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
snake_case: str =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case: Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case: List[Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case: str =model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : List[Any] ) -> Optional[int]:
snake_case: int =self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
): Optional[int] =config_and_inputs
snake_case: Optional[Any] ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case , snake_case , unittest.TestCase ):
UpperCAmelCase : Optional[Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Tuple = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase : List[str] = True
def UpperCamelCase ( self : str , a_ : Optional[int] , a_ : Union[str, Any] , a_ : str=False ) -> Dict:
snake_case: List[Any] =super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class in get_values(a_ ):
snake_case: str =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a_ )
snake_case: Dict =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
snake_case: int =AlbertModelTester(self )
snake_case: Optional[int] =ConfigTester(self , config_class=a_ , hidden_size=3_7 )
def UpperCamelCase ( self : Dict ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Dict ) -> Optional[int]:
snake_case: List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase ( self : Tuple ) -> int:
snake_case: List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a_ )
def UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
snake_case: List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def UpperCamelCase ( self : int ) -> List[str]:
snake_case: str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a_ )
def UpperCamelCase ( self : Dict ) -> int:
snake_case: List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
def UpperCamelCase ( self : List[Any] ) -> int:
snake_case: Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def UpperCamelCase ( self : Tuple ) -> List[str]:
snake_case: List[Any] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case: Optional[Any] =type
self.model_tester.create_and_check_model(*a_ )
@slow
def UpperCamelCase ( self : Any ) -> Optional[Any]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case: List[str] =AlbertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class a_ ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
snake_case: int =AlbertModel.from_pretrained('albert-base-v2' )
snake_case: str =torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
snake_case: Optional[int] =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case: str =model(a_ , attention_mask=a_ )[0]
snake_case: List[str] =torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a_ )
snake_case: str =torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
| 347
|
'''simple docstring'''
import sys
from collections import defaultdict
class a_ :
def __init__( self : Union[str, Any] ) -> Optional[int]:
snake_case: Any =[]
def UpperCamelCase ( self : List[str] , a_ : Optional[int] ) -> str:
return self.node_position[vertex]
def UpperCamelCase ( self : Union[str, Any] , a_ : List[str] , a_ : List[Any] ) -> Tuple:
snake_case: Optional[Any] =pos
def UpperCamelCase ( self : Any , a_ : Any , a_ : Tuple , a_ : Optional[Any] , a_ : Optional[Any] ) -> Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case: int =2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case: int =2 * start + 1
else:
snake_case: Any =2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case , snake_case: Any =heap[smallest_child], positions[smallest_child]
snake_case , snake_case: int =(
heap[start],
positions[start],
)
snake_case , snake_case: Any =temp, tempa
snake_case: List[Any] =self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a_ )
self.top_to_bottom(a_ , a_ , a_ , a_ )
def UpperCamelCase ( self : Union[str, Any] , a_ : Tuple , a_ : List[str] , a_ : int , a_ : str ) -> List[str]:
snake_case: Tuple =position[index]
while index != 0:
snake_case: List[Any] =int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
snake_case: Tuple =heap[parent]
snake_case: Dict =position[parent]
self.set_position(position[parent] , a_ )
else:
snake_case: Dict =val
snake_case: List[str] =temp
self.set_position(a_ , a_ )
break
snake_case: List[str] =parent
else:
snake_case: List[str] =val
snake_case: int =temp
self.set_position(a_ , 0 )
def UpperCamelCase ( self : Optional[Any] , a_ : Tuple , a_ : Optional[int] ) -> List[str]:
snake_case: Optional[int] =len(a_ ) // 2 - 1
for i in range(a_ , -1 , -1 ):
self.top_to_bottom(a_ , a_ , len(a_ ) , a_ )
def UpperCamelCase ( self : Optional[Any] , a_ : Union[str, Any] , a_ : Optional[int] ) -> List[Any]:
snake_case: Dict =positions[0]
snake_case: List[Any] =sys.maxsize
self.top_to_bottom(a_ , 0 , len(a_ ) , a_ )
return temp
def a_ ( __UpperCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case: Union[str, Any] =Heap()
snake_case: Optional[int] =[0] * len(__UpperCAmelCase )
snake_case: Optional[Any] =[-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case: List[str] =[] # Heap of Distance of vertices from their neighboring vertex
snake_case: Tuple =[]
for vertex in range(len(__UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__UpperCAmelCase )
heap.node_position.append(__UpperCAmelCase )
snake_case: int =[]
snake_case: Union[str, Any] =1
snake_case: Union[str, Any] =sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case: List[Any] =0
snake_case: Union[str, Any] =distance
heap.heapify(__UpperCAmelCase , __UpperCAmelCase )
for _ in range(1 , len(__UpperCAmelCase ) ):
snake_case: Optional[int] =heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case: Union[str, Any] =1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__UpperCAmelCase )]
):
snake_case: List[Any] =distance
heap.bottom_to_top(
__UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
snake_case: str =vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
a = int(input('Enter number of edges: ').strip())
a = defaultdict(list)
for _ in range(edges_number):
a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 347
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = 'Hello, World!'
lowercase_ = 'en_XX'
def UpperCAmelCase ( _lowercase : str , _lowercase : str , _lowercase : bool ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ = Path('''data_bin''' )
lowerCAmelCase_ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_lowercase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_lowercase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(_lowercase )
lowerCAmelCase_ = xmod.model.encoder.sentence_encoder
lowerCAmelCase_ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCAmelCase_ = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , _lowercase )
lowerCAmelCase_ = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCAmelCase_ = xmod_sent_encoder.embed_tokens.weight
lowerCAmelCase_ = xmod_sent_encoder.embed_positions.weight
lowerCAmelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCAmelCase_ = xmod_sent_encoder.layernorm_embedding.weight
lowerCAmelCase_ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCAmelCase_ = model.roberta.encoder.layer[i]
lowerCAmelCase_ = xmod_sent_encoder.layers[i]
# self attention
lowerCAmelCase_ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
lowerCAmelCase_ = xmod_layer.self_attn.q_proj.weight
lowerCAmelCase_ = xmod_layer.self_attn.q_proj.bias
lowerCAmelCase_ = xmod_layer.self_attn.k_proj.weight
lowerCAmelCase_ = xmod_layer.self_attn.k_proj.bias
lowerCAmelCase_ = xmod_layer.self_attn.v_proj.weight
lowerCAmelCase_ = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCAmelCase_ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
lowerCAmelCase_ = xmod_layer.self_attn.out_proj.weight
lowerCAmelCase_ = xmod_layer.self_attn.out_proj.bias
lowerCAmelCase_ = xmod_layer.self_attn_layer_norm.weight
lowerCAmelCase_ = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCAmelCase_ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
lowerCAmelCase_ = xmod_layer.fca.weight
lowerCAmelCase_ = xmod_layer.fca.bias
# output
lowerCAmelCase_ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
lowerCAmelCase_ = xmod_layer.fca.weight
lowerCAmelCase_ = xmod_layer.fca.bias
lowerCAmelCase_ = xmod_layer.final_layer_norm.weight
lowerCAmelCase_ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCAmelCase_ = xmod_layer.adapter_layer_norm.weight
lowerCAmelCase_ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCAmelCase_ = bert_output.adapter_modules[lang_code]
lowerCAmelCase_ = xmod_layer.adapter_modules[lang_code]
lowerCAmelCase_ = from_adapter.fca.weight
lowerCAmelCase_ = from_adapter.fca.bias
lowerCAmelCase_ = from_adapter.fca.weight
lowerCAmelCase_ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCAmelCase_ = xmod_sent_encoder.layer_norm.weight
lowerCAmelCase_ = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCAmelCase_ = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCAmelCase_ = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCAmelCase_ = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCAmelCase_ = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCAmelCase_ = xmod.model.encoder.lm_head.dense.weight
lowerCAmelCase_ = xmod.model.encoder.lm_head.dense.bias
lowerCAmelCase_ = xmod.model.encoder.lm_head.layer_norm.weight
lowerCAmelCase_ = xmod.model.encoder.lm_head.layer_norm.bias
lowerCAmelCase_ = xmod.model.encoder.lm_head.weight
lowerCAmelCase_ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCAmelCase_ = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
lowerCAmelCase_ = model(_lowercase )[0]
if classification_head:
lowerCAmelCase_ = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_lowercase ) )
else:
lowerCAmelCase_ = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCAmelCase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCAmelCase_ = torch.allclose(_lowercase , _lowercase , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
lowercase_ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 552
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'vocab_file': 'vocab.txt'}
lowercase_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
lowercase_ = {
'YituTech/conv-bert-base': 5_12,
'YituTech/conv-bert-medium-small': 5_12,
'YituTech/conv-bert-small': 5_12,
}
lowercase_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __a ( __snake_case ):
lowerCamelCase : Optional[int] =VOCAB_FILES_NAMES
lowerCamelCase : List[str] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[str] =ConvBertTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase="[UNK]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[PAD]" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase_ = getattr(UpperCAmelCase , normalizer_state.pop('''type''' ) )
lowerCAmelCase_ = do_lower_case
lowerCAmelCase_ = strip_accents
lowerCAmelCase_ = tokenize_chinese_chars
lowerCAmelCase_ = normalizer_class(**UpperCAmelCase )
lowerCAmelCase_ = do_lower_case
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 552
| 1
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase_ = random.Random()
def lowerCAmelCase (__A , __A=1.0 , __A=None , __A=None):
"""simple docstring"""
if rng is None:
_a = global_rng
_a = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=7 , A=400 , A=2_000 , A=1 , A=0.0 , A=16_000 , A=True , A=True , ) -> Optional[Any]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = min_seq_length
_a = max_seq_length
_a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a = feature_size
_a = padding_value
_a = sampling_rate
_a = return_attention_mask
_a = do_normalize
def a__ (self ) -> Dict:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ (self , A=False , A=False ) -> int:
"""simple docstring"""
def _flatten(A ):
return list(itertools.chain(*A ) )
if equal_length:
_a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = WavaVecaFeatureExtractor
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = WavaVecaFeatureExtractionTester(self )
def a__ (self , A ) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(A , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A , axis=0 ) - 1 ) < 1E-3 ) )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_a = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
_a = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
_a = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(A , A , atol=1E-3 ) )
# Test batched
_a = feat_extract(A , return_tensors='''np''' ).input_values
_a = feat_extract(A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_a = np.asarray(A )
_a = feat_extract(A , return_tensors='''np''' ).input_values
_a = feat_extract(A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1E-3 ) )
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_a = ['''longest''', '''max_length''', '''do_not_pad''']
_a = [None, 1_600, None]
for max_length, padding in zip(A , A ):
_a = feat_extract(A , padding=A , max_length=A , return_tensors='''np''' )
_a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a = range(800 , 1_400 , 200 )
_a = [floats_list((1, x) )[0] for x in lengths]
_a = ['''longest''', '''max_length''', '''do_not_pad''']
_a = [None, 1_600, None]
for max_length, padding in zip(A , A ):
_a = feat_extract(A , max_length=A , padding=A )
_a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_a = feat_extract(
A , truncation=A , max_length=1_000 , padding='''max_length''' , return_tensors='''np''' )
_a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_a = feat_extract(
A , truncation=A , max_length=1_000 , padding='''longest''' , return_tensors='''np''' )
_a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
_a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_a = feat_extract(
A , truncation=A , max_length=2_000 , padding='''longest''' , return_tensors='''np''' )
_a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
@require_torch
def a__ (self ) -> Tuple:
"""simple docstring"""
import torch
_a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a = np.random.rand(100 ).astype(np.floataa )
_a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_a = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_a = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def a__ (self ) -> Tuple:
"""simple docstring"""
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_a = WavaVecaConfig.from_pretrained(A )
_a = WavaVecaFeatureExtractor.from_pretrained(A )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 352
|
'''simple docstring'''
import numpy as np
import qiskit
def lowerCAmelCase (__A = 8 , __A = None):
"""simple docstring"""
_a = np.random.default_rng(seed=__A)
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_a = 6 * key_len
# Measurement basis for Alice's qubits.
_a = rng.integers(2 , size=__A)
# The set of states Alice will prepare.
_a = rng.integers(2 , size=__A)
# Measurement basis for Bob's qubits.
_a = rng.integers(2 , size=__A)
# Quantum Circuit to simulate BB84
_a = qiskit.QuantumCircuit(__A , name='''BB84''')
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__A):
if alice_state[index] == 1:
bbaa_circ.x(__A)
if alice_basis[index] == 1:
bbaa_circ.h(__A)
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__A):
if bob_basis[index] == 1:
bbaa_circ.h(__A)
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_a = qiskit.Aer.get_backend('''aer_simulator''')
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_a = qiskit.execute(__A , __A , shots=1 , seed_simulator=__A)
# Returns the result of measurement.
_a = job.result().get_counts(__A).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_a = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__A , __A , __A)
if alice_basis_bit == bob_basis_bit
])
# Get final key. Pad with 0 if too short, otherwise truncate.
_a = gen_key[:key_len] if len(__A) >= key_len else gen_key.ljust(__A , '''0''')
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 352
| 1
|
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __magic_name__ ( lowercase_ ):
UpperCamelCase_ :int = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
UpperCamelCase_ :List[Any] = """CIDAS/clipseg-rd64-refined"""
UpperCamelCase_ :List[str] = """image_segmenter"""
UpperCamelCase_ :List[str] = CLIPSegForImageSegmentation
UpperCamelCase_ :Optional[int] = ["""image""", """text"""]
UpperCamelCase_ :int = ["""image"""]
def __init__( self , *_lowercase , **_lowercase )-> Dict:
requires_backends(self , ["vision"] )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> Union[str, Any]:
return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase_ , return_tensors="pt" )
def UpperCAmelCase_ ( self , _lowercase )-> List[Any]:
with torch.no_grad():
UpperCamelCase_ = self.model(**lowerCamelCase_ ).logits
return logits
def UpperCAmelCase_ ( self , _lowercase )-> str:
UpperCamelCase_ = outputs.cpu().detach().numpy()
UpperCamelCase_ = 0
UpperCamelCase_ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 628
|
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = parent
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {}
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR=\"FFFFFF\">
<HR>
<a href=\"http://google.com\">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style=\"color:#0000FF\">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
SCREAMING_SNAKE_CASE : int = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_a, html_string_a]
@require_bsa
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MarkupLMFeatureExtractionTester(self )
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class()
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = get_html_strings()[0]
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(lowerCamelCase_ )
# fmt: off
SCREAMING_SNAKE_CASE : Optional[int] = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]]
SCREAMING_SNAKE_CASE : List[str] = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]]
# fmt: on
self.assertEqual(encoding.nodes , lowerCamelCase_ )
self.assertEqual(encoding.xpaths , lowerCamelCase_ )
# Test batched
SCREAMING_SNAKE_CASE : str = get_html_strings()
SCREAMING_SNAKE_CASE : str = feature_extractor(lowerCamelCase_ )
# fmt: off
SCREAMING_SNAKE_CASE : int = expected_nodes + [["""My First Heading""", """My first paragraph."""]]
SCREAMING_SNAKE_CASE : List[Any] = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCamelCase_ )
self.assertEqual(encoding.xpaths , lowerCamelCase_ )
| 379
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a_ : List[str] = logging.get_logger(__name__)
a_ : Union[str, Any] = {'vocab_file': 'vocab.txt'}
a_ : str = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a_ : List[str] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
a_ : Tuple = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class SCREAMING_SNAKE_CASE_ ( _A ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_INIT_CONFIGURATION
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ConvBertTokenizer
def __init__( self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ) -> List[Any]:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , )
__magic_name__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase__ ) != tokenize_chinese_chars
):
__magic_name__ = getattr(UpperCamelCase__ , normalizer_state.pop('''type''' ) )
__magic_name__ = do_lower_case
__magic_name__ = strip_accents
__magic_name__ = tokenize_chinese_chars
__magic_name__ = normalizer_class(**UpperCamelCase__ )
__magic_name__ = do_lower_case
def __A ( self , A , A=None ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
__magic_name__ = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 710
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = self.vocab_size - 1
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , A , A , A , A , *A ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , head_mask=A )
__magic_name__ = model(A , token_type_ids=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , *A ) -> Dict:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> List[Any]:
'''simple docstring'''
__magic_name__ = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_a = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_a = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __A ( self , A , A , A=False ) -> List[str]:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , )
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = inputs_dict['''labels''']
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = OpenAIGPTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , n_embd=37 )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(A )
__magic_name__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=A ) # the president is
__magic_name__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__magic_name__ = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A )
| 678
| 0
|
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
SCREAMING_SNAKE_CASE_ = datasets.utils.logging.get_logger(__name__)
class lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
_A = None
_A = None
class lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
_A = datasets.Audio()
_A = 'audio'
_A = AudioFolderConfig
_A = 42 # definition at the bottom of the script
_A = AudioClassification(audio_column='audio' , label_column='label' )
SCREAMING_SNAKE_CASE_ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
SCREAMING_SNAKE_CASE_ = AUDIO_EXTENSIONS
| 597
|
'''simple docstring'''
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__a , __a : int = y, x % y
return abs(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( ):
try:
__a : Optional[Any] = input('Enter two integers separated by comma (,): ' ).split(',' )
__a : Any = int(nums[0] )
__a : Tuple = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 597
| 1
|
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowercase ( ):
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowercase_ : Union[str, Any] = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , __snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowercase ( ):
assert _test_patching.open is open
lowercase_ : List[Any] = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , __snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowercase ( ):
# pandas.read_csv is not present in _test_patching
lowercase_ : Union[str, Any] = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , __snake_case ):
pass
def lowercase ( ):
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
lowercase_ : Optional[int] = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , __snake_case ) is None
with patch_submodule(_test_patching , '''len''' , __snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowercase ( ):
lowercase_ : List[Any] = '''__test_patch_submodule_start_and_stop_mock__'''
lowercase_ : Optional[Any] = patch_submodule(_test_patching , '''open''' , __snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowercase ( ):
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowercase_ : List[Any] = '''__test_patch_submodule_successive_join__'''
lowercase_ : Optional[int] = '''__test_patch_submodule_successive_dirname__'''
lowercase_ : int = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , __snake_case ):
with patch_submodule(_test_patching , '''os.rename''' , __snake_case ):
with patch_submodule(_test_patching , '''os.path.dirname''' , __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , __snake_case ):
with patch_submodule(_test_patching , '''os.path.join''' , __snake_case ):
with patch_submodule(_test_patching , '''os.path.dirname''' , __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowercase ( ):
lowercase_ : Tuple = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , __snake_case ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , __snake_case ):
pass
| 704
|
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _UpperCAmelCase :
def __init__( self : Tuple , A : Union[str, Any] , A : Optional[int]=13 , A : Dict=30 , A : List[Any]=2 , A : List[Any]=3 , A : Tuple=True , A : Dict=True , A : Union[str, Any]=32 , A : Optional[int]=5 , A : Tuple=4 , A : Any=37 , A : Dict="gelu" , A : Optional[Any]=0.1 , A : Union[str, Any]=0.1 , A : Any=10 , A : Dict=0.02 , A : Any=3 , A : str=None , A : Dict=2 , ) -> Optional[int]:
lowercase_ : str = parent
lowercase_ : Optional[int] = batch_size
lowercase_ : int = image_size
lowercase_ : str = patch_size
lowercase_ : Dict = num_channels
lowercase_ : Optional[Any] = is_training
lowercase_ : str = use_labels
lowercase_ : Tuple = hidden_size
lowercase_ : int = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : Dict = intermediate_size
lowercase_ : Dict = hidden_act
lowercase_ : Union[str, Any] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : List[str] = type_sequence_label_size
lowercase_ : Any = initializer_range
lowercase_ : Optional[int] = scope
lowercase_ : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ : Optional[Any] = (image_size // patch_size) ** 2
lowercase_ : List[Any] = num_patches + 2
def A ( self : Dict ) -> str:
lowercase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Tuple = None
if self.use_labels:
lowercase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def A ( self : Tuple ) -> str:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A ( self : Optional[Any] , A : Optional[int] , A : Any , A : Any ) -> int:
lowercase_ : Any = DeiTModel(config=A )
model.to(A )
model.eval()
lowercase_ : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Union[str, Any] , A : int , A : Optional[int] , A : Optional[Any] ) -> str:
lowercase_ : int = DeiTForMaskedImageModeling(config=A )
model.to(A )
model.eval()
lowercase_ : int = model(A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ : int = 1
lowercase_ : Any = DeiTForMaskedImageModeling(A )
model.to(A )
model.eval()
lowercase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Dict = model(A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A ( self : int , A : str , A : Optional[int] , A : List[str] ) -> List[Any]:
lowercase_ : str = self.type_sequence_label_size
lowercase_ : Tuple = DeiTForImageClassification(A )
model.to(A )
model.eval()
lowercase_ : int = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ : Dict = 1
lowercase_ : Optional[Any] = DeiTForImageClassification(A )
model.to(A )
model.eval()
lowercase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Dict = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : Dict ) -> Optional[Any]:
lowercase_ : List[str] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = config_and_inputs
lowercase_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Dict = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : List[str] = False
def A ( self : Optional[Any] ) -> Dict:
lowercase_ : List[Any] = DeiTModelTester(self )
lowercase_ : int = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def A ( self : Optional[int] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def A ( self : int ) -> List[Any]:
pass
def A ( self : Any ) -> Tuple:
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def A ( self : Tuple ) -> Optional[int]:
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(A )
lowercase_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Any = [*signature.parameters.keys()]
lowercase_ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def A ( self : Tuple ) -> Optional[int]:
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : Dict ) -> List[str]:
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def A ( self : int ) -> Union[str, Any]:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def A ( self : Tuple , A : Optional[int] , A : int , A : Optional[Any]=False ) -> int:
lowercase_ : Dict = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A ( self : int ) -> str:
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : str = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(A )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase_ : Any = model_class(A )
model.to(A )
model.train()
lowercase_ : Any = self._prepare_for_class(A , A , return_labels=A )
lowercase_ : Optional[Any] = model(**A ).loss
loss.backward()
def A ( self : Dict ) -> Union[str, Any]:
lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
if model_class in get_values(A ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase_ : Optional[Any] = model_class(A )
model.gradient_checkpointing_enable()
model.to(A )
model.train()
lowercase_ : str = self._prepare_for_class(A , A , return_labels=A )
lowercase_ : Optional[Any] = model(**A ).loss
loss.backward()
def A ( self : Tuple ) -> Any:
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[Any] = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(A ),
*get_values(A ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
lowercase_ : int = problem_type['''title''']
lowercase_ : int = problem_type['''num_labels''']
lowercase_ : Dict = model_class(A )
model.to(A )
model.train()
lowercase_ : Tuple = self._prepare_for_class(A , A , return_labels=A )
if problem_type["num_labels"] > 1:
lowercase_ : Any = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
lowercase_ : Dict = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=A ) as warning_list:
lowercase_ : str = model(**A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def A ( self : Union[str, Any] ) -> Union[str, Any]:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = DeiTModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase ( ):
lowercase_ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def A ( self : Optional[Any] ) -> Dict:
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def A ( self : Optional[Any] ) -> Dict:
lowercase_ : Dict = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
A )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Tuple = prepare_img()
lowercase_ : Dict = image_processor(images=A , return_tensors='''pt''' ).to(A )
# forward pass
with torch.no_grad():
lowercase_ : Any = model(**A )
# verify the logits
lowercase_ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , A )
lowercase_ : Optional[Any] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def A ( self : Union[str, Any] ) -> Tuple:
lowercase_ : int = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
lowercase_ : Any = self.default_image_processor
lowercase_ : Union[str, Any] = prepare_img()
lowercase_ : List[Any] = image_processor(images=A , return_tensors='''pt''' )
lowercase_ : Any = inputs.pixel_values.to(A )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ : List[Any] = model(A )
| 141
| 0
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A__ = flax_key_tuple[:-1] + ('weight',)
A__ = torch.permute(__UpperCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__UpperCamelCase ):
# linear layer
A__ = flax_key_tuple[:-1] + ('weight',)
A__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
if "metadata" in layer:
A__ = layer.split('metadata' )
A__ = ''.join(split_layer[0] )[:-1]
A__ = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
A__ = layer.split('kvstore' )
A__ = ''.join(split_layer[0] )[:-1]
A__ = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
A__ = layer.split('/' )
A__ = '/'.join(split_layer[:-1] )
A__ = (split_layer[-1],)
if "kvstore/path" in layer:
A__ = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
A__ = 'file'
else:
A__ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
A__ = rename_keys(__UpperCamelCase )
A__ = {}
for k, v in current_block.items():
A__ = v
A__ = new_current_block
torch.save(__UpperCamelCase , __UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = WEIGHTS_NAME ) -> List[str]:
A__ = convert_file_size_to_int(__UpperCamelCase )
A__ = []
A__ = {}
A__ = 0
A__ = 0
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
A__ = serialization.msgpack_restore(fp.read() )['optimizer']['target']
A__ = flatten_dict(__UpperCamelCase , sep='/' )
A__ = {}
for layer in checkpoint_info.keys():
A__ , A__ , A__ = get_key_and_tensorstore_dict(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if curr_real_layer_name in all_layers:
A__ = content
else:
A__ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A__ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A__ = torch.tensor(__UpperCamelCase )
A__ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A__ , A__ = rename_base_flax_keys(tuple(key.split('/' ) ) , __UpperCamelCase )
A__ = '/'.join(__UpperCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A__ = os.path.join(
__UpperCamelCase , weights_name.replace('.bin' , f'''-{len(__UpperCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
A__ = {}
A__ = 0
A__ = raw_weights.to(getattr(__UpperCamelCase , __UpperCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A__ = os.path.join(__UpperCamelCase , weights_name.replace('.bin' , f'''-{len(__UpperCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__UpperCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A__ = {}
A__ = {}
for idx, shard in enumerate(__UpperCamelCase ):
A__ = weights_name.replace(
'.bin' , f'''-{idx+1:05d}-of-{len(__UpperCamelCase ):05d}.bin''' ) # len(sharded_state_dicts):05d}
A__ = os.path.join(__UpperCamelCase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__UpperCamelCase , os.path.join(__UpperCamelCase , __UpperCamelCase ) )
A__ = shard
for key in shard:
A__ = shard_file
# Add the metadata
A__ = {'total_size': total_size}
A__ = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , 'w' , encoding='utf-8' ) as f:
A__ = json.dumps(__UpperCamelCase , indent=2 , sort_keys=__UpperCamelCase ) + '\n'
f.write(__UpperCamelCase )
return metadata, index
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A ( ) -> Optional[Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A__ = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
A__ = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
A__ = TaTokenizer.from_pretrained('t5-small' )
A__ = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
A__ = tokenizer(__UpperCamelCase , return_tensors='pt' ).input_ids
A__ = model.generate(__UpperCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 9
|
from __future__ import annotations
from fractions import Fraction
def A ( __UpperCamelCase , __UpperCamelCase ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A ( __UpperCamelCase ) -> list[str]:
A__ = []
A__ = 11
A__ = int('1' + '0' * digit_len )
for num in range(__UpperCamelCase , __UpperCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__UpperCamelCase , __UpperCamelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
A__ = 10
return solutions
def A ( __UpperCamelCase = 2 ) -> int:
A__ = 1.0
for fraction in fraction_list(__UpperCamelCase ):
A__ = Fraction(__UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 9
| 1
|
'''simple docstring'''
def __snake_case (__UpperCAmelCase ):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase_ : List[Any] = grid[0]
for row_n in range(1 , len(__UpperCAmelCase ) ):
lowerCamelCase_ : Dict = grid[row_n]
lowerCamelCase_ : Optional[int] = fill_row(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Dict = grid[row_n]
return grid[-1][-1]
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
current_row[0] += row_above[0]
for cell_n in range(1 , len(__UpperCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
'''simple docstring'''
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 418
| 0
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def lowerCAmelCase_ ( snake_case_ : int = 1_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
UpperCAmelCase_ = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase_ = pre_numerator
UpperCAmelCase_ = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase_ = cur_numerator
UpperCAmelCase_ = e_cont * pre_numerator + temp
return sum_digits(snake_case_ )
if __name__ == "__main__":
print(f"{solution() = }")
| 78
|
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: Tuple =False, False, False
@dataclass
class __A :
a__ : Optional[int] = None
a__ : bool = True
a__ : bool = True
a__ : Optional[str] = None
# Automatically constructed
a__ : ClassVar[str] = "dict"
a__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
a__ : str = field(default="""Audio""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__(self : Optional[Any] ):
return self.pa_type
def _lowercase (self : str , __a : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__a , __a ):
return {"bytes": None, "path": value}
elif isinstance(__a , __a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(__a , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
UpperCAmelCase_ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767
UpperCAmelCase_ = BytesIO(bytes() )
sf.write(__a , __a , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _lowercase (self : Dict , __a : dict , __a : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
UpperCAmelCase_ , UpperCAmelCase_ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
UpperCAmelCase_ = xsplitext(__a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split("::" )[-1]
try:
UpperCAmelCase_ = string_to_dict(__a , config.HUB_DATASETS_URL )["repo_id"]
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(__a , "rb" , use_auth_token=__a ) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(__a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(__a , orig_sr=__a , target_sr=self.sampling_rate )
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowercase (self : Dict ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def _lowercase (self : Optional[Any] , __a : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
UpperCAmelCase_ = pa.array([Audio().encode_example(__a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCAmelCase_ = storage.field("bytes" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCAmelCase_ = storage.field("path" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(__a , self.pa_type )
def _lowercase (self : Dict , __a : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__a : Tuple ):
with xopen(__a , "rb" ) as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(__a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__a , self.pa_type )
| 78
| 1
|
import numpy as np
from PIL import Image
def A_ ( lowercase_ , lowercase_ , lowercase_ ) ->np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE = np.array(lowercase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
SCREAMING_SNAKE_CASE = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
SCREAMING_SNAKE_CASE = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
return updated_arr
def A_ ( lowercase_ , lowercase_ , lowercase_ ) ->np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE = np.array(lowercase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
SCREAMING_SNAKE_CASE = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
SCREAMING_SNAKE_CASE = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
__UpperCAmelCase = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 259
|
import argparse
from collections import defaultdict
import yaml
__UpperCAmelCase = "docs/source/en/_toctree.yml"
def A_ ( lowercase_ ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = defaultdict(lowercase_ )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(lowercase_ ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(lowercase_ , key=lambda lowercase_ : s["title"].lower() )
def A_ ( lowercase_=False ) ->List[Any]:
"""simple docstring"""
with open(lowercase_ , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE = content[api_idx]['sections']
# Then to the model doc
SCREAMING_SNAKE_CASE = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE = api_doc[model_idx]['sections']
SCREAMING_SNAKE_CASE = [(idx, section) for idx, section in enumerate(lowercase_ ) if 'sections' in section]
SCREAMING_SNAKE_CASE = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE = modality_doc['sections']
SCREAMING_SNAKE_CASE = clean_model_doc_toc(lowercase_ )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE = True
if overwrite:
SCREAMING_SNAKE_CASE = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE = model_doc
SCREAMING_SNAKE_CASE = api_doc
with open(lowercase_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowercase_ , allow_unicode=lowercase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__UpperCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 259
| 1
|
from bisect import bisect
from itertools import accumulate
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = sorted(zip(__lowerCAmelCase , __lowerCAmelCase ) , key=lambda __lowerCAmelCase : x[0] / x[1] , reverse=__lowerCAmelCase )
_snake_case , _snake_case : Optional[Any] = [i[0] for i in r], [i[1] for i in r]
_snake_case : Optional[Any] = list(accumulate(__lowerCAmelCase ) )
_snake_case : List[Any] = bisect(__lowerCAmelCase , __lowerCAmelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = (DEISMultistepScheduler,)
_UpperCamelCase : Union[str, Any] = (("num_inference_steps", 25),)
def __UpperCAmelCase ( self : Dict , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
_snake_case : Any = {
'num_train_timesteps': 10_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**lowerCamelCase_ )
return config
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Optional[Any]=0 , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Dict = dict(self.forward_default_kwargs )
_snake_case : Tuple = kwargs.pop('num_inference_steps' , lowerCamelCase_ )
_snake_case : List[Any] = self.dummy_sample
_snake_case : Optional[int] = 0.1 * sample
_snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case : Optional[Any] = self.get_scheduler_config(**lowerCamelCase_ )
_snake_case : Tuple = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
_snake_case : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
_snake_case : Optional[Any] = scheduler_class.from_pretrained(lowerCamelCase_ )
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
_snake_case : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case , _snake_case : Optional[Any] = sample, sample
for t in range(lowerCamelCase_ , time_step + scheduler.config.solver_order + 1 ):
_snake_case : Optional[int] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
_snake_case : Any = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : int , lowerCamelCase_ : int=0 , **lowerCamelCase_ : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = dict(self.forward_default_kwargs )
_snake_case : Optional[int] = kwargs.pop('num_inference_steps' , lowerCamelCase_ )
_snake_case : Any = self.dummy_sample
_snake_case : Union[str, Any] = 0.1 * sample
_snake_case : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case : Any = self.get_scheduler_config()
_snake_case : Union[str, Any] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
_snake_case : List[Any] = scheduler_class.from_pretrained(lowerCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
_snake_case : Tuple = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : List[Any]=None , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
if scheduler is None:
_snake_case : Dict = self.scheduler_classes[0]
_snake_case : int = self.get_scheduler_config(**lowerCamelCase_ )
_snake_case : Dict = scheduler_class(**lowerCamelCase_ )
_snake_case : Tuple = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**lowerCamelCase_ )
_snake_case : Tuple = scheduler_class(**lowerCamelCase_ )
_snake_case : str = 10
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Dict = model(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
return sample
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = dict(self.forward_default_kwargs )
_snake_case : Any = kwargs.pop('num_inference_steps' , lowerCamelCase_ )
for scheduler_class in self.scheduler_classes:
_snake_case : List[str] = self.get_scheduler_config()
_snake_case : int = scheduler_class(**lowerCamelCase_ )
_snake_case : Union[str, Any] = self.dummy_sample
_snake_case : Union[str, Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase_ , 'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase_ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase_ , 'set_timesteps' ):
_snake_case : Optional[int] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
_snake_case : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
_snake_case : Any = scheduler.timesteps[5]
_snake_case : List[str] = scheduler.timesteps[6]
_snake_case : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
_snake_case : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = DEISMultistepScheduler(**self.get_scheduler_config() )
_snake_case : List[str] = self.full_loop(scheduler=lowerCamelCase_ )
_snake_case : int = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
_snake_case : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_snake_case : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_snake_case : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
_snake_case : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config )
_snake_case : Dict = self.full_loop(scheduler=lowerCamelCase_ )
_snake_case : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase_ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , algorithm_type='deis' , solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , )
_snake_case : str = self.full_loop(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , )
assert not torch.isnan(lowerCamelCase_ ).any(), "Samples have nan numbers"
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
self.check_over_configs(lower_order_final=lowerCamelCase_ )
self.check_over_configs(lower_order_final=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowerCamelCase_ , time_step=0 )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = self.full_loop()
_snake_case : List[Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Optional[Any] = self.full_loop(prediction_type='v_prediction' )
_snake_case : List[str] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : str = self.scheduler_classes[0]
_snake_case : Dict = self.get_scheduler_config(thresholding=lowerCamelCase_ , dynamic_thresholding_ratio=0 )
_snake_case : Optional[int] = scheduler_class(**lowerCamelCase_ )
_snake_case : str = 10
_snake_case : Tuple = self.dummy_model()
_snake_case : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Any = model(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[int] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
assert sample.dtype == torch.floataa
| 304
| 1
|
def A_( A , A ):
_validate_point(A )
_validate_point(A )
if len(A ) != len(A ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(A , A ) ) )
def A_( A ):
if point:
if isinstance(A , A ):
for item in point:
if not isinstance(A , (int, float) ):
UpperCAmelCase_ = (
"""Expected a list of numbers as input, found """
f"""{type(A ).__name__}"""
)
raise TypeError(A )
else:
UpperCAmelCase_ = f"""Expected a list of numbers as input, found {type(A ).__name__}"""
raise TypeError(A )
else:
raise ValueError("""Missing an input""" )
def A_( A , A ):
_validate_point(A )
_validate_point(A )
if len(A ) != len(A ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(A , A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 486
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def A_( ):
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=A , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=A , default=5 )
parser.add_argument("""--batch_size""" , type=A , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=A , default=1 )
parser.add_argument("""--freeze""" , type=A , default=A )
parser.add_argument("""--learning_rate""" , type=A , default=5E-4 )
parser.add_argument("""--seed""" , type=A , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=A , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=A , default=10 )
parser.add_argument("""--weight_decay""" , type=A , default=0.01 )
parser.add_argument("""--output_dir""" , type=A , default="""./results""" )
return parser.parse_args()
UpperCamelCase__ : Any = load("""accuracy""")
def A_( A ):
UpperCAmelCase_ , UpperCAmelCase_ = eval_pred
UpperCAmelCase_ = np.argmax(A , axis=1 )
return metric.compute(predictions=A , references=A )
class _UpperCamelCase ( A_ ):
'''simple docstring'''
def __init__( self : str , __lowercase : Any ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ = trainer
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowercase : int , __lowercase : Tuple , __lowercase : Dict , **__lowercase : Dict ):
'''simple docstring'''
if control.should_evaluate:
UpperCAmelCase_ = deepcopy(__lowercase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def A_( ):
UpperCAmelCase_ = get_args()
set_seed(args.seed )
UpperCAmelCase_ = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
UpperCAmelCase_ = dataset.train_test_split(test_size=0.2 )
UpperCAmelCase_ = train_test["""test"""].train_test_split(test_size=0.5 )
UpperCAmelCase_ = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase_ = tokenizer.eos_token
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
UpperCAmelCase_ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
UpperCAmelCase_ = False
UpperCAmelCase_ = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(A ):
UpperCAmelCase_ = tokenizer(example["""src"""] , truncation=A , max_length=1024 )
UpperCAmelCase_ = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
UpperCAmelCase_ = train_test_validation.map(
A , batched=A , remove_columns=train_test_validation["""train"""].column_names , )
UpperCAmelCase_ = DataCollatorWithPadding(tokenizer=A )
UpperCAmelCase_ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
UpperCAmelCase_ = Trainer(
model=A , args=A , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=A , data_collator=A , compute_metrics=A , )
print("""Training...""" )
trainer.add_callback(CustomCallback(A ) )
trainer.train()
if __name__ == "__main__":
main()
| 486
| 1
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class a__ :
def __init__( self , _a , _a=sys.maxsize ):
lowercase : Optional[int] = "bilinear"
lowercase : Optional[int] = max_size
lowercase : List[str] = short_edge_length
def __call__( self , _a ):
lowercase : Union[str, Any] = []
for img in imgs:
lowercase , lowercase : Tuple = img.shape[:2]
# later: provide list and randomly choose index for resize
lowercase : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowercase : List[Any] = size * 1.0 / min(UpperCamelCase__ , UpperCamelCase__ )
if h < w:
lowercase , lowercase : Union[str, Any] = size, scale * w
else:
lowercase , lowercase : str = scale * h, size
if max(UpperCamelCase__ , UpperCamelCase__ ) > self.max_size:
lowercase : Dict = self.max_size * 1.0 / max(UpperCamelCase__ , UpperCamelCase__ )
lowercase : Dict = newh * scale
lowercase : Any = neww * scale
lowercase : List[str] = int(neww + 0.5 )
lowercase : Tuple = int(newh + 0.5 )
if img.dtype == np.uinta:
lowercase : Any = Image.fromarray(UpperCamelCase__ )
lowercase : str = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowercase : Optional[int] = np.asarray(UpperCamelCase__ )
else:
lowercase : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowercase : int = nn.functional.interpolate(
UpperCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase__ ).squeeze(0 )
img_augs.append(UpperCamelCase__ )
return img_augs
class a__ :
def __init__( self , _a ):
lowercase : int = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowercase : Optional[int] = cfg.INPUT.FORMAT
lowercase : Union[str, Any] = cfg.SIZE_DIVISIBILITY
lowercase : str = cfg.PAD_VALUE
lowercase : str = cfg.INPUT.MAX_SIZE_TEST
lowercase : str = cfg.MODEL.DEVICE
lowercase : Any = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase : int = lambda _a : (x - self.pixel_mean) / self.pixel_std
def __magic_name__ ( self , _a ):
lowercase : str = tuple(max(UpperCamelCase__ ) for s in zip(*[img.shape for img in images] ) )
lowercase : Any = [im.shape[-2:] for im in images]
lowercase : Tuple = [
nn.functional.pad(
UpperCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase__ , UpperCamelCase__ )
]
return torch.stack(UpperCamelCase__ ), torch.tensor(UpperCamelCase__ )
def __call__( self , _a , _a=False ):
with torch.no_grad():
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase : List[Any] = [images]
if single_image:
assert len(UpperCamelCase__ ) == 1
for i in range(len(UpperCamelCase__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(UpperCamelCase__ , images.pop(UpperCamelCase__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
UpperCamelCase__ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowercase : List[str] = torch.tensor([im.shape[:2] for im in images] )
lowercase : List[str] = self.aug(UpperCamelCase__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowercase : Union[str, Any] = [self.normalizer(UpperCamelCase__ ) for x in images]
# now pad them to do the following operations
lowercase , lowercase : int = self.pad(UpperCamelCase__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowercase : Tuple = torch.true_divide(UpperCamelCase__ , UpperCamelCase__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def __magic_name__ ( __snake_case : Optional[Any] , __snake_case : Any ) -> Optional[Any]:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def __magic_name__ ( __snake_case : Any , __snake_case : Tuple[int, int] ) -> List[Any]:
assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!"
lowercase , lowercase : Tuple = box_size
tensor[:, 0].clamp_(min=0 , max=__snake_case )
tensor[:, 1].clamp_(min=0 , max=__snake_case )
tensor[:, 2].clamp_(min=0 , max=__snake_case )
tensor[:, 3].clamp_(min=0 , max=__snake_case )
| 361
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''wavlm'''
def __init__( self : Tuple , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Any="group" , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=1_2_8 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Optional[Any]=3_2_0 , UpperCamelCase__ : Any=8_0_0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=0.05 , UpperCamelCase__ : Optional[Any]=1_0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Optional[int]=3_2_0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Optional[int]=2_5_6 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple="mean" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase__ : Any=(1, 2, 3, 1, 1) , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : str=8_0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__)
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = conv_bias
snake_case__ = num_buckets
snake_case__ = max_bucket_distance
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim)
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = num_ctc_classes
snake_case__ = vocab_size
snake_case__ = do_stable_layer_norm
snake_case__ = use_weighted_layer_sum
snake_case__ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case__ = num_codevectors_per_group
snake_case__ = num_codevector_groups
snake_case__ = contrastive_logits_temperature
snake_case__ = num_negatives
snake_case__ = codevector_dim
snake_case__ = proj_codevector_dim
snake_case__ = diversity_loss_weight
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# adapter
snake_case__ = add_adapter
snake_case__ = adapter_kernel_size
snake_case__ = adapter_stride
snake_case__ = num_adapter_layers
snake_case__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = xvector_output_dim
@property
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 654
| 0
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
lowerCamelCase = {'target_lang': 'fi', 'source_lang': 'en'}
lowerCamelCase = '>>zh<<'
lowerCamelCase = 'Helsinki-NLP/'
if is_torch_available():
lowerCamelCase = 'pt'
elif is_tf_available():
lowerCamelCase = 'tf'
else:
lowerCamelCase = 'jax'
@require_sentencepiece
class snake_case_ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase =MarianTokenizer
__UpperCAmelCase =False
__UpperCAmelCase =True
def A__ ( self ):
super().setUp()
__lowerCAmelCase = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
__lowerCAmelCase = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__lowerCAmelCase = Path(self.tmpdirname )
save_json(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES['target_spm'] )
__lowerCAmelCase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , **_A ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def A__ ( self , _A ):
return (
"This is a test",
"This is a test",
)
def A__ ( self ):
__lowerCAmelCase = '''</s>'''
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def A__ ( self ):
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(__lowerCamelCase ) , 9 )
def A__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def A__ ( self ):
__lowerCAmelCase = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" )
__lowerCAmelCase = en_de_tokenizer(['I am a small frog'] , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__lowerCAmelCase = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(__lowerCamelCase , batch.input_ids[0] )
__lowerCAmelCase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__lowerCamelCase )
__lowerCAmelCase = [x.name for x in Path(__lowerCamelCase ).glob('*' )]
self.assertIn('source.spm' , __lowerCamelCase )
MarianTokenizer.from_pretrained(__lowerCamelCase )
def A__ ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = tok(
['I am a small frog' * 1_0_0_0, 'I am a small frog'] , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def A__ ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = tok(['I am a tiny frog', 'I am a small frog'] , padding=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def A__ ( self ):
__lowerCAmelCase = {'''input_ids''': [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def A__ ( self ):
__lowerCAmelCase = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
__lowerCAmelCase = '''Tämä on testi'''
__lowerCAmelCase = '''This is a test'''
__lowerCAmelCase = [7_6, 7, 2_0_4_7, 2]
__lowerCAmelCase = [6_9, 1_2, 1_1, 9_4_0, 2]
__lowerCAmelCase = tokenizer(__lowerCamelCase ).input_ids
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowerCAmelCase = tokenizer(text_target=__lowerCamelCase ).input_ids
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowerCAmelCase = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 702
|
def __lowercase ( UpperCAmelCase__ = 10 , UpperCAmelCase__ = 1_000 , UpperCAmelCase__ = True ):
"""simple docstring"""
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return int((number_a + number_a) / 2 )
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(UpperCAmelCase__ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
__lowerCAmelCase = lower
__lowerCAmelCase = higher
__lowerCAmelCase = []
while True:
__lowerCAmelCase = get_avg(UpperCAmelCase__ , UpperCAmelCase__ )
last_numbers.append(UpperCAmelCase__ )
if answer(UpperCAmelCase__ ) == "low":
__lowerCAmelCase = number
elif answer(UpperCAmelCase__ ) == "high":
__lowerCAmelCase = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def __lowercase ( ):
"""simple docstring"""
__lowerCAmelCase = int(input('Enter lower value : ' ).strip() )
__lowerCAmelCase = int(input('Enter high value : ' ).strip() )
__lowerCAmelCase = int(input('Enter value to guess : ' ).strip() )
guess_the_number(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 102
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : Dict , a_ : List[str] , a_ : Optional[int]=13 , a_ : Optional[int]=7 , a_ : int=True , a_ : Optional[Any]=True , a_ : str=True , a_ : Optional[Any]=99 , a_ : List[Any]=32 , a_ : List[str]=5 , a_ : Any=4 , a_ : Union[str, Any]=37 , a_ : Optional[Any]="gelu" , a_ : int=0.1 , a_ : str=0.1 , a_ : Tuple=5_12 , a_ : List[Any]=16 , a_ : Union[str, Any]=2 , a_ : Optional[Any]=0.02 , a_ : Optional[Any]=3 , a_ : Union[str, Any]=4 , a_ : Optional[int]=None , ):
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : Any = batch_size
lowerCAmelCase_ : List[Any] = seq_length
lowerCAmelCase_ : Optional[int] = is_training
lowerCAmelCase_ : List[str] = use_token_type_ids
lowerCAmelCase_ : Any = use_labels
lowerCAmelCase_ : int = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Dict = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Tuple = hidden_act
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : Dict = type_vocab_size
lowerCAmelCase_ : Any = type_sequence_label_size
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : Union[str, Any] = num_labels
lowerCAmelCase_ : List[str] = num_choices
lowerCAmelCase_ : Union[str, Any] = scope
lowerCAmelCase_ : Optional[int] = self.vocab_size - 1
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Dict = None
if self.use_token_type_ids:
lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : str = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : Dict = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCAmelCase_ : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase ( self : List[Any] , a_ : int , a_ : Optional[Any] , a_ : Optional[Any] , a_ : List[Any] , *a_ : Tuple ):
lowerCAmelCase_ : Dict = OpenAIGPTModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
lowerCAmelCase_ : Union[str, Any] = model(snake_case_ , token_type_ids=snake_case_ , head_mask=snake_case_ )
lowerCAmelCase_ : List[Any] = model(snake_case_ , token_type_ids=snake_case_ )
lowerCAmelCase_ : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : Optional[int] , a_ : Dict , a_ : List[Any] , a_ : Union[str, Any] , a_ : Tuple , *a_ : Tuple ):
lowerCAmelCase_ : Tuple = OpenAIGPTLMHeadModel(snake_case_ )
model.to(snake_case_ )
model.eval()
lowerCAmelCase_ : Any = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : Optional[Any] , a_ : Union[str, Any] , a_ : int , a_ : Union[str, Any] , a_ : Optional[Any] , *a_ : Optional[Any] ):
lowerCAmelCase_ : int = OpenAIGPTDoubleHeadsModel(snake_case_ )
model.to(snake_case_ )
model.eval()
lowerCAmelCase_ : Any = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : List[str] , a_ : int , a_ : Any , a_ : List[str] , a_ : Any , *a_ : List[Any] ):
lowerCAmelCase_ : Dict = self.num_labels
lowerCAmelCase_ : List[str] = OpenAIGPTForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Optional[Any] = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : int = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase_ : Dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a_ : Dict = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a_ : int = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase ( self : Tuple , a_ : List[str] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : List[str] , a_ : str ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCamelCase ( self : Optional[int] , a_ : Tuple , a_ : str , a_ : Optional[int]=False ):
lowerCAmelCase_ : Union[str, Any] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCAmelCase_ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_ , )
lowerCAmelCase_ : Any = inputs_dict["labels"]
lowerCAmelCase_ : List[str] = inputs_dict["labels"]
lowerCAmelCase_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=snake_case_ , )
lowerCAmelCase_ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : int = OpenAIGPTModelTester(self )
lowerCAmelCase_ : int = ConfigTester(self , config_class=snake_case_ , n_embd=37 )
def lowerCamelCase ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*snake_case_ )
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case_ )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*snake_case_ )
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*snake_case_ )
@slow
def lowerCamelCase ( self : Optional[int] ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Optional[Any] = OpenAIGPTModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(snake_case_ )
lowerCAmelCase_ : Tuple = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=snake_case_ ) # the president is
lowerCAmelCase_ : Optional[Any] = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCAmelCase_ : List[str] = model.generate(snake_case_ , do_sample=snake_case_ )
self.assertListEqual(output_ids[0].tolist() , snake_case_ )
| 610
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def A__ ( A__ ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_UpperCAmelCase = np.array(A__ ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(A__ )
return 2.0 * image - 1.0
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ , ) -> int:
super().__init__()
self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self , snake_case_ = None , snake_case_ = 1 , snake_case_ = 100 , snake_case_ = 0.0 , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}""" )
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = preprocess(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
_UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample
_UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 426
| 0
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
snake_case_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCamelCase( UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any ) -> Tuple:
for attribute in key.split('''.''' ):
A : str = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
A : int = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
A : str = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A : Optional[Any] = value
elif weight_type == "weight_g":
A : Union[str, Any] = value
elif weight_type == "weight_v":
A : Dict = value
elif weight_type == "bias":
A : Union[str, Any] = value
else:
A : Dict = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _lowerCamelCase( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> Optional[Any]:
A : str = []
A : str = fairseq_model.state_dict()
A : int = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
A : List[Any] = None
for name, value in fairseq_dict.items():
A : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
A : List[str] = True
elif name.split('''.''' )[0] == "proj":
A : Optional[int] = fairseq_model.proj
A : Any = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
A : List[Any] = True
if "*" in mapped_key:
A : Optional[Any] = name.split(UpperCamelCase__ )[0].split('''.''' )[-2]
A : Optional[Any] = mapped_key.replace('''*''' , UpperCamelCase__ )
if "weight_g" in name:
A : Dict = '''weight_g'''
elif "weight_v" in name:
A : str = '''weight_v'''
elif "bias" in name:
A : Dict = '''bias'''
elif "weight" in name:
A : Dict = '''weight'''
else:
A : str = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def _lowerCamelCase( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> List[str]:
A : Optional[Any] = full_name.split('''conv_layers.''' )[-1]
A : Optional[Any] = name.split('''.''' )
A : List[str] = int(items[0] )
A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase__ )
def _lowerCamelCase( UpperCamelCase__ : Any ) -> Dict:
A, A : Tuple = emb.weight.shape
A : List[Any] = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A : int = emb.weight.data
return lin_layer
def _lowerCamelCase( UpperCamelCase__ : Dict ) -> Tuple:
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
A : List[Any] = f.readlines()
A : Any = [line.split(''' ''' )[0] for line in lines]
A : int = len(UpperCamelCase__ )
A : Optional[int] = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(UpperCamelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _lowerCamelCase( UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , ) -> Union[str, Any]:
A : Tuple = WavaVecaConfig.from_pretrained(UpperCamelCase__ )
A : Any = SpeechaTextaConfig.from_pretrained(
UpperCamelCase__ , vocab_size=UpperCamelCase__ , decoder_layers=UpperCamelCase__ , do_stable_layer_norm=UpperCamelCase__ )
A : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
A, A, A : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
A : Union[str, Any] = model[0].eval()
# set weights for wav2vec2 encoder
A : Dict = WavaVecaModel(UpperCamelCase__ )
A : Optional[Any] = recursively_load_weights_wavaveca(model.encoder , UpperCamelCase__ )
A : str = SpeechaTextaForCausalLM(UpperCamelCase__ )
A, A : Any = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase__ )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
A : Optional[int] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
A : List[str] = SpeechEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
A : Optional[int] = False
# add projection layer
A : List[Any] = nn.Parameter(projection_layer.weight )
A : Tuple = nn.Parameter(projection_layer.bias )
A : Dict = create_vocab_dict(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
A : Any = SpeechaTextaTokenizer(os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
tokenizer.save_pretrained(UpperCamelCase__ )
A : Dict = hf_wavavec.config.to_dict()
A : Optional[Any] = tokenizer.pad_token_id
A : int = tokenizer.bos_token_id
A : Union[str, Any] = tokenizer.eos_token_id
A : Dict = '''speech_to_text_2'''
A : Tuple = '''wav2vec2'''
A : Optional[Any] = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase__ )
hf_wavavec.save_pretrained(UpperCamelCase__ )
feature_extractor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_02_24, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
snake_case_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 537
|
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _lowerCamelCase( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ) -> List[Any]:
A : Any = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A : List[Any] = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
A : List[Any] = F'''{src_lang}-{tgt_lang}'''
A : Any = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
A : Optional[int] = os.path.join(UpperCamelCase__ , '''README.md''' )
print(F'''Generating {path}''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
snake_case_ = Path(__file__).resolve().parent.parent.parent
snake_case_ = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
snake_case_ , snake_case_ , snake_case_ = model_name.split("""-""")
snake_case_ = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 537
| 1
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = LongformerTokenizer
lowerCamelCase_ = True
lowerCamelCase_ = LongformerTokenizerFast
lowerCamelCase_ = True
def _snake_case ( self :int ) -> Tuple:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE__ = dict(zip(__A , range(len(__A ) ) ) )
SCREAMING_SNAKE_CASE__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
SCREAMING_SNAKE_CASE__ = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__A ) )
def _snake_case ( self :str , **__A :Tuple ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self :Union[str, Any] , **__A :str ) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self :List[Any] , __A :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """lower newer"""
SCREAMING_SNAKE_CASE__ = """lower newer"""
return input_text, output_text
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ = """lower newer"""
SCREAMING_SNAKE_CASE__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__A ) # , add_prefix_space=True)
self.assertListEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def _snake_case ( self :int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__A ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__A ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def _snake_case ( self :Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__A , add_prefix_space=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__A , add_prefix_space=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _snake_case ( self :List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = """Encode this sequence."""
SCREAMING_SNAKE_CASE__ = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE__ = tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__A , __A )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__A , __A )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE__ = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__A , lstrip=__A , rstrip=__A )} ) # mask token has a left space
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(__A )
SCREAMING_SNAKE_CASE__ = """Encode <mask> sequence"""
SCREAMING_SNAKE_CASE__ = """Encode <mask>sequence"""
SCREAMING_SNAKE_CASE__ = tokenizer.encode(__A )
SCREAMING_SNAKE_CASE__ = encoded.index(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(__A )
SCREAMING_SNAKE_CASE__ = encoded.index(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__A , __A )
def _snake_case ( self :Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
def _snake_case ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = """A, <mask> AllenNLP sentence."""
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
SCREAMING_SNAKE_CASE__ = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
SCREAMING_SNAKE_CASE__ = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def _snake_case ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __A )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __A )
self.assertEqual(post_processor_state["""trim_offsets"""] , __A )
def _snake_case ( self :Optional[int] ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE__ = f'''{text_of_1_token} {text_of_1_token}'''
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ) + 1, len(__A ) + 1 + len(__A )) , )
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ) + 1, len(__A ) + 1 + len(__A )) , )
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ), len(__A ) + 1 + len(__A )) , )
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ), len(__A ) + 1 + len(__A )) , )
SCREAMING_SNAKE_CASE__ = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ) + 1, 1 + len(__A ) + 1 + len(__A )) , )
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) , )
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) , )
| 6
|
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : Dict = (PNDMScheduler,)
lowerCAmelCase__ : Tuple = (("num_inference_steps", 50),)
def _lowerCamelCase ( self : Tuple ,**UpperCamelCase : Any ) -> Dict:
_lowercase : Tuple = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**UpperCamelCase )
return config
def _lowerCamelCase ( self : Optional[int] ,UpperCamelCase : Dict=0 ,**UpperCamelCase : List[str] ) -> Any:
_lowercase : Optional[Any] = dict(self.forward_default_kwargs )
_lowercase : Tuple = kwargs.pop('num_inference_steps' ,UpperCamelCase )
_lowercase : Tuple = self.dummy_sample
_lowercase : Optional[int] = 0.1 * sample
_lowercase : Dict = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_lowercase : str = self.get_scheduler_config(**UpperCamelCase )
_lowercase : List[Any] = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
# copy over dummy past residuals
_lowercase : str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase )
_lowercase : List[str] = scheduler_class.from_pretrained(UpperCamelCase )
new_scheduler.set_timesteps(UpperCamelCase )
# copy over dummy past residuals
_lowercase : Any = dummy_past_residuals[:]
_lowercase : Dict = scheduler.step_prk(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample
_lowercase : Optional[Any] = new_scheduler.step_prk(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowercase : Union[str, Any] = scheduler.step_plms(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample
_lowercase : Optional[Any] = new_scheduler.step_plms(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowerCamelCase ( self : str ) -> List[Any]:
pass
def _lowerCamelCase ( self : Dict ,UpperCamelCase : List[Any]=0 ,**UpperCamelCase : List[Any] ) -> List[Any]:
_lowercase : int = dict(self.forward_default_kwargs )
_lowercase : Any = kwargs.pop('num_inference_steps' ,UpperCamelCase )
_lowercase : List[str] = self.dummy_sample
_lowercase : Dict = 0.1 * sample
_lowercase : Any = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_lowercase : int = self.get_scheduler_config()
_lowercase : Union[str, Any] = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowercase : int = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase )
_lowercase : Union[str, Any] = scheduler_class.from_pretrained(UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowercase : Any = dummy_past_residuals[:]
_lowercase : Dict = scheduler.step_prk(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample
_lowercase : List[Any] = new_scheduler.step_prk(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowercase : int = scheduler.step_plms(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample
_lowercase : Tuple = new_scheduler.step_plms(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowerCamelCase ( self : Optional[int] ,**UpperCamelCase : Any ) -> List[Any]:
_lowercase : Dict = self.scheduler_classes[0]
_lowercase : Union[str, Any] = self.get_scheduler_config(**UpperCamelCase )
_lowercase : Optional[int] = scheduler_class(**UpperCamelCase )
_lowercase : Dict = 10
_lowercase : str = self.dummy_model()
_lowercase : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowercase : Any = model(UpperCamelCase ,UpperCamelCase )
_lowercase : Union[str, Any] = scheduler.step_prk(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowercase : Optional[Any] = model(UpperCamelCase ,UpperCamelCase )
_lowercase : Tuple = scheduler.step_plms(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ).prev_sample
return sample
def _lowerCamelCase ( self : Optional[Any] ) -> List[str]:
_lowercase : Union[str, Any] = dict(self.forward_default_kwargs )
_lowercase : List[str] = kwargs.pop('num_inference_steps' ,UpperCamelCase )
for scheduler_class in self.scheduler_classes:
_lowercase : str = self.get_scheduler_config()
_lowercase : Dict = scheduler_class(**UpperCamelCase )
_lowercase : int = self.dummy_sample
_lowercase : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase ,'set_timesteps' ):
scheduler.set_timesteps(UpperCamelCase )
elif num_inference_steps is not None and not hasattr(UpperCamelCase ,'set_timesteps' ):
_lowercase : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowercase : List[str] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
_lowercase : List[Any] = dummy_past_residuals[:]
_lowercase : List[str] = scheduler.step_prk(UpperCamelCase ,0 ,UpperCamelCase ,**UpperCamelCase ).prev_sample
_lowercase : List[str] = scheduler.step_prk(UpperCamelCase ,1 ,UpperCamelCase ,**UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
_lowercase : str = scheduler.step_plms(UpperCamelCase ,0 ,UpperCamelCase ,**UpperCamelCase ).prev_sample
_lowercase : Any = scheduler.step_plms(UpperCamelCase ,1 ,UpperCamelCase ,**UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def _lowerCamelCase ( self : Optional[int] ) -> List[str]:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase )
def _lowerCamelCase ( self : Union[str, Any] ) -> int:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase )
_lowercase : Tuple = self.scheduler_classes[0]
_lowercase : List[str] = self.get_scheduler_config(steps_offset=1 )
_lowercase : Any = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def _lowerCamelCase ( self : Any ) -> Optional[int]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=UpperCamelCase ,beta_end=UpperCamelCase )
def _lowerCamelCase ( self : Any ) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase )
def _lowerCamelCase ( self : List[Any] ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase )
def _lowerCamelCase ( self : Optional[Any] ) -> Any:
for t in [1, 5, 10]:
self.check_over_forward(time_step=UpperCamelCase )
def _lowerCamelCase ( self : Dict ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase )
def _lowerCamelCase ( self : Union[str, Any] ) -> Optional[int]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
_lowercase : Dict = 27
for scheduler_class in self.scheduler_classes:
_lowercase : List[Any] = self.dummy_sample
_lowercase : List[str] = 0.1 * sample
_lowercase : Union[str, Any] = self.get_scheduler_config()
_lowercase : Any = scheduler_class(**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowercase : Optional[int] = scheduler.step_prk(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ).prev_sample
def _lowerCamelCase ( self : Dict ) -> Dict:
with self.assertRaises(UpperCamelCase ):
_lowercase : Optional[int] = self.scheduler_classes[0]
_lowercase : str = self.get_scheduler_config()
_lowercase : Tuple = scheduler_class(**UpperCamelCase )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def _lowerCamelCase ( self : Optional[int] ) -> int:
_lowercase : Any = self.full_loop()
_lowercase : Optional[Any] = torch.sum(torch.abs(UpperCamelCase ) )
_lowercase : Any = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def _lowerCamelCase ( self : Any ) -> Union[str, Any]:
_lowercase : Tuple = self.full_loop(prediction_type='v_prediction' )
_lowercase : Optional[Any] = torch.sum(torch.abs(UpperCamelCase ) )
_lowercase : int = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def _lowerCamelCase ( self : List[Any] ) -> str:
# We specify different beta, so that the first alpha is 0.99
_lowercase : List[Any] = self.full_loop(set_alpha_to_one=UpperCamelCase ,beta_start=0.0_1 )
_lowercase : List[Any] = torch.sum(torch.abs(UpperCamelCase ) )
_lowercase : Any = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def _lowerCamelCase ( self : Any ) -> Optional[int]:
# We specify different beta, so that the first alpha is 0.99
_lowercase : Union[str, Any] = self.full_loop(set_alpha_to_one=UpperCamelCase ,beta_start=0.0_1 )
_lowercase : List[Any] = torch.sum(torch.abs(UpperCamelCase ) )
_lowercase : List[Any] = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 125
| 0
|
lowerCAmelCase__ = 8.31_44_62 # Unit - J mol-1 K-1
def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 702
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , __snake_case=32):
set_seed(0)
_UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3)
_UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def A__ ( self):
_UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
_UpperCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
_UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)]
_UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)]
_UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)]
# train with a DDPM scheduler
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
| 648
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any]=7 , lowerCamelCase_ :str=3 , lowerCamelCase_ :Dict=18 , lowerCamelCase_ :str=30 , lowerCamelCase_ :int=4_00 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Any=None , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :List[str]=[0.5, 0.5, 0.5] , lowerCamelCase_ :Any=[0.5, 0.5, 0.5] , lowerCamelCase_ :List[Any]=False , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'''height''': 20, '''width''': 20}
SCREAMING_SNAKE_CASE : Optional[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Any = min_resolution
SCREAMING_SNAKE_CASE : List[str] = max_resolution
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : Dict = size
SCREAMING_SNAKE_CASE : Tuple = do_center_crop
SCREAMING_SNAKE_CASE : List[Any] = crop_size
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : Any = image_mean
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std
SCREAMING_SNAKE_CASE : Tuple = do_reduce_labels
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __A ( )-> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
SCREAMING_SNAKE_CASE : int = Image.open(dataset[0]['''file'''] )
SCREAMING_SNAKE_CASE : Optional[int] = Image.open(dataset[1]['''file'''] )
return image, map
def __A ( )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
SCREAMING_SNAKE_CASE : Tuple = Image.open(ds[0]['''file'''] )
SCREAMING_SNAKE_CASE : str = Image.open(ds[1]['''file'''] )
SCREAMING_SNAKE_CASE : int = Image.open(ds[2]['''file'''] )
SCREAMING_SNAKE_CASE : str = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = BeitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = BeitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self :int ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''image_std''' ) )
def __lowerCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowerCamelCase_ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Dict = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self :str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = []
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
SCREAMING_SNAKE_CASE : List[str] = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE : List[str] = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = prepare_semantic_batch_inputs()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE : Any = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : List[Any] = image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 698
|
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowerCamelCase__ : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __A ( a_ : Optional[int] , a_ : str , a_ : str , a_ : str , a_ : List[str] )-> Tuple:
'''simple docstring'''
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE : Any = getattr(a_ , a_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE : Optional[int] = getattr(a_ , a_ ).shape
else:
SCREAMING_SNAKE_CASE : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : Optional[int] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : List[Any] = value
else:
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __A ( a_ : Optional[Any] , a_ : Dict )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Optional[Any] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : Tuple = hf_model.feature_extractor
SCREAMING_SNAKE_CASE : Tuple = hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : int = False
if "conv_layers" in name:
load_conv_layer(
a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE : List[str] = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(a_ , a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Dict = name.split(a_ )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE : Optional[int] = mapped_key.replace('''*''' , a_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE : List[str] = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE : str = '''bias'''
elif "weight" in name:
SCREAMING_SNAKE_CASE : Tuple = '''weight'''
else:
SCREAMING_SNAKE_CASE : str = None
set_recursively(a_ , a_ , a_ , a_ , a_ )
continue
if not is_used:
unused_weights.append(a_ )
logger.warning(F"Unused weights: {unused_weights}" )
def __A ( a_ : Dict , a_ : int , a_ : Optional[int] , a_ : Optional[int] , a_ : Dict )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE : List[str] = name.split('''.''' )
SCREAMING_SNAKE_CASE : Dict = int(items[0] )
SCREAMING_SNAKE_CASE : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a_ )
def __A ( a_ : Optional[int] , a_ : Optional[int] , a_ : Any , a_ : Any )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = full_name.split('''adaptor.''' )[-1]
SCREAMING_SNAKE_CASE : List[Any] = name.split('''.''' )
if items[1].isdigit():
SCREAMING_SNAKE_CASE : List[Any] = int(items[1] )
else:
SCREAMING_SNAKE_CASE : str = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : int = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(a_ , a_ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
SCREAMING_SNAKE_CASE : str = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(a_ )
def __A ( a_ : Optional[Any] )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = emb.weight.shape
SCREAMING_SNAKE_CASE : Any = nn.Linear(a_ , a_ , bias=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = emb.weight.data
return lin_layer
@torch.no_grad()
def __A ( a_ : Tuple , a_ : Optional[int] , a_ : List[Any] , a_ : Any , a_ : Tuple , a_ : int , a_ : Any , a_ : str , a_ : Tuple , a_ : Union[str, Any] , a_ : Union[str, Any] , )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = WavaVecaConfig.from_pretrained(
a_ , add_adapter=a_ , adapter_stride=a_ , adapter_kernel_size=a_ , use_auth_token=a_ , output_hidden_size=a_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = MBartConfig.from_pretrained(a_ )
# load model
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
SCREAMING_SNAKE_CASE : int = model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(a_ , use_auth_token=a_ )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE : str = WavaVecaModel(a_ )
recursively_load_weights_wavaveca(model.encoder , a_ )
# load decoder weights
SCREAMING_SNAKE_CASE : Dict = MBartForCausalLM(a_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a_ )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechEncoderDecoderModel(encoder=a_ , decoder=a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer(a_ )
tokenizer.save_pretrained(a_ )
SCREAMING_SNAKE_CASE : Tuple = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE : Any = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE : List[str] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = '''mbart50'''
SCREAMING_SNAKE_CASE : Optional[int] = '''wav2vec2'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : List[str] = 25_00_04
SCREAMING_SNAKE_CASE : Dict = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE : Any = SpeechEncoderDecoderConfig.from_dict(a_ )
hf_wavavec.save_pretrained(a_ )
feature_extractor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250004, type=int, help="`decoder_start_token_id` of model config")
lowerCamelCase__ : Dict = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 698
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 98
|
'''simple docstring'''
def _snake_case ( A ) -> int:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = 0
lowerCAmelCase__ = number
while duplicate > 0:
lowerCAmelCase__ , lowerCAmelCase__ = divmod(A , 10 )
fact_sum += factorial(A )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
__UpperCAmelCase = int(input('''Enter number: ''').strip())
print(
f"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."""
)
| 98
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : List[Any] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class _lowercase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Optional[int] = 'ibert'
def __init__( self , lowerCamelCase__=30_522 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1E-12 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=False , lowerCamelCase__="none" , **lowerCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCAmelCase_: Tuple = vocab_size
lowerCAmelCase_: Optional[int] = hidden_size
lowerCAmelCase_: Dict = num_hidden_layers
lowerCAmelCase_: str = num_attention_heads
lowerCAmelCase_: str = hidden_act
lowerCAmelCase_: List[str] = intermediate_size
lowerCAmelCase_: int = hidden_dropout_prob
lowerCAmelCase_: List[str] = attention_probs_dropout_prob
lowerCAmelCase_: Dict = max_position_embeddings
lowerCAmelCase_: List[str] = type_vocab_size
lowerCAmelCase_: Any = initializer_range
lowerCAmelCase_: List[str] = layer_norm_eps
lowerCAmelCase_: Tuple = position_embedding_type
lowerCAmelCase_: List[Any] = quant_mode
lowerCAmelCase_: str = force_dequant
class _lowercase ( _A ):
'''simple docstring'''
@property
def _a ( self ):
if self.task == "multiple-choice":
lowerCAmelCase_: Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase_: Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 613
|
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__snake_case : int = logging.get_logger(__name__)
class lowercase_ ( _A ):
def __init__( self , **UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["bs4"] )
super().__init__(**UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCAmelCase_ = parent.find_all(child.name , recursive=UpperCamelCase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase__ ) else next(i for i, s in enumerate(UpperCamelCase__ , 1 ) if s is child ) )
UpperCAmelCase_ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = BeautifulSoup(UpperCamelCase__ , "html.parser" )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for element in html_code.descendants:
if type(UpperCamelCase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCAmelCase_ = html.unescape(UpperCamelCase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = self.xpath_soup(UpperCamelCase__ )
stringaxtag_seq.append(UpperCamelCase__ )
stringaxsubs_seq.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = ""
for tagname, subs in zip(UpperCamelCase__ , UpperCamelCase__ ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , UpperCamelCase__ ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ = False
# Check that strings has a valid type
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = True
elif isinstance(UpperCamelCase__ , (list, tuple) ):
if len(UpperCamelCase__ ) == 0 or isinstance(html_strings[0] , UpperCamelCase__ ):
UpperCAmelCase_ = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F"""but is of type {type(UpperCamelCase__ )}.""" )
UpperCAmelCase_ = bool(isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase__ )) )
if not is_batched:
UpperCAmelCase_ = [html_strings]
# Get nodes + xpaths
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for html_string in html_strings:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.get_three_from_single(UpperCamelCase__ )
nodes.append(UpperCamelCase__ )
UpperCAmelCase_ = []
for node, tag_list, sub_list in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.construct_xpath(UpperCamelCase__ , UpperCamelCase__ )
xpath_strings.append(UpperCamelCase__ )
xpaths.append(UpperCamelCase__ )
# return as Dict
UpperCAmelCase_ = {"nodes": nodes, "xpaths": xpaths}
UpperCAmelCase_ = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
return encoded_inputs
| 660
| 0
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__lowercase = True
except (ImportError, AttributeError):
__lowercase = object
def lowerCAmelCase (*__UpperCamelCase : List[str] , **__UpperCamelCase : Optional[int] ):
"""simple docstring"""
pass
__lowercase = False
__lowercase = logging.get_logger('''transformers-cli/serving''')
def lowerCAmelCase (__UpperCamelCase : Namespace ):
"""simple docstring"""
__UpperCamelCase =pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(_lowerCAmelCase , args.host , args.port , args.workers )
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = 42
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = 42
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = 42
class _lowercase ( __a ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase__ : ArgumentParser ) -> Any:
'''simple docstring'''
__UpperCamelCase =parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=__A , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=__A , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=__A , default=8888 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=__A , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=__A , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=__A , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=__A , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=__A , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=__A )
def __init__( self : List[Any] , UpperCamelCase__ : Pipeline , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Dict:
'''simple docstring'''
__UpperCamelCase =pipeline
__UpperCamelCase =host
__UpperCamelCase =port
__UpperCamelCase =workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install \"transformers[serving]\".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(f"""Serving model over {host}:{port}""" )
__UpperCamelCase =FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=__A , response_class=__A , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=__A , response_class=__A , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=__A , response_class=__A , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=__A , response_class=__A , methods=['''POST'''] , ),
] , timeout=600 , )
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
run(self._app , host=self.host , port=self.port , workers=self.workers )
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : str = Body(__A , embed=__A ) , UpperCamelCase__ : bool = Body(__A , embed=__A ) ) -> Optional[int]:
'''simple docstring'''
try:
__UpperCamelCase =self._pipeline.tokenizer.tokenize(__A )
if return_ids:
__UpperCamelCase =self._pipeline.tokenizer.convert_tokens_to_ids(__A )
return ServeTokenizeResult(tokens=__A , tokens_ids=__A )
else:
return ServeTokenizeResult(tokens=__A )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(__A )} )
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : List[int] = Body(__A , embed=__A ) , UpperCamelCase__ : bool = Body(__A , embed=__A ) , UpperCamelCase__ : bool = Body(__A , embed=__A ) , ) -> Any:
'''simple docstring'''
try:
__UpperCamelCase =self._pipeline.tokenizer.decode(__A , __A , __A )
return ServeDeTokenizeResult(model='''''' , text=__A )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(__A )} )
async def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : Dict=Body(__A , embed=__A ) ) -> Optional[int]:
'''simple docstring'''
if len(__A ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__UpperCamelCase =self._pipeline(__A )
return ServeForwardResult(output=__A )
except Exception as e:
raise HTTPException(500 , {'''error''': str(__A )} )
| 711
|
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__lowercase = logging.getLogger()
def lowerCAmelCase (__UpperCamelCase : Path , __UpperCamelCase : list ):
"""simple docstring"""
__UpperCamelCase ='''\n'''.join(__UpperCamelCase )
Path(__UpperCamelCase ).open('''w''' ).writelines(__UpperCamelCase )
__lowercase = '''patrickvonplaten/t5-tiny-random'''
__lowercase = '''sshleifer/bart-tiny-random'''
__lowercase = '''sshleifer/tiny-mbart'''
__lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _lowercase ( __a ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
__UpperCamelCase =Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
__UpperCamelCase =input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
__UpperCamelCase =[''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
__UpperCamelCase ='''translation_en_to_de''' if model == T5_TINY else '''summarization'''
__UpperCamelCase =f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_generate()
assert Path(UpperCamelCase__ ).exists()
# os.remove(Path(output_file_name))
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
self.run_eval_tester(UpperCamelCase__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
self.run_eval_tester(UpperCamelCase__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : Any ) -> Any:
'''simple docstring'''
__UpperCamelCase =Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
__UpperCamelCase =input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
__UpperCamelCase ={
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
__UpperCamelCase =Path(self.get_auto_remove_tmp_dir() )
__UpperCamelCase =str(tmp_dir / '''scores.json''' )
__UpperCamelCase =str(tmp_dir / '''val.target''' )
_dump_articles(UpperCamelCase__ , text['''en'''] )
_dump_articles(UpperCamelCase__ , text['''de'''] )
__UpperCamelCase ='''translation_en_to_de''' if model == T5_TINY else '''summarization'''
__UpperCamelCase =f"""
run_eval_search.py
{model}
{str(UpperCamelCase__ )}
{str(UpperCamelCase__ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
with CaptureStdout() as cs:
run_search()
__UpperCamelCase =[''' num_beams | length_penalty''', model, '''Best score args''']
__UpperCamelCase =['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(UpperCamelCase__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(UpperCamelCase__ ).exists()
os.remove(Path(UpperCamelCase__ ) )
| 296
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class lowerCamelCase_ ( snake_case_ ):
_lowerCAmelCase : Dict = 'pix2struct_text_model'
_lowerCAmelCase : List[str] = ['past_key_values']
_lowerCAmelCase : str = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[Any] , lowerCAmelCase__ : int=5_02_44 , lowerCAmelCase__ : Dict=7_68 , lowerCAmelCase__ : Dict=64 , lowerCAmelCase__ : int=20_48 , lowerCAmelCase__ : Optional[Any]=12 , lowerCAmelCase__ : Tuple=12 , lowerCAmelCase__ : Union[str, Any]=32 , lowerCAmelCase__ : List[Any]=1_28 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : Optional[Any]=1e-6 , lowerCAmelCase__ : Optional[int]=1.0 , lowerCAmelCase__ : Dict="gelu_new" , lowerCAmelCase__ : str=0 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : List[Any]=0 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : str=False , lowerCAmelCase__ : Dict=True , **lowerCAmelCase__ : List[Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : int = d_kv
SCREAMING_SNAKE_CASE : Optional[Any] = d_ff
SCREAMING_SNAKE_CASE : Optional[int] = num_layers
SCREAMING_SNAKE_CASE : int = num_heads
SCREAMING_SNAKE_CASE : List[str] = relative_attention_num_buckets
SCREAMING_SNAKE_CASE : Dict = relative_attention_max_distance
SCREAMING_SNAKE_CASE : List[Any] = dropout_rate
SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : str = initializer_factor
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : List[Any] = eos_token_id
SCREAMING_SNAKE_CASE : List[str] = decoder_start_token_id
# for backwards compatibility
SCREAMING_SNAKE_CASE : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , tie_word_embeddings=lowerCAmelCase__ , is_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
@classmethod
def __lowercase ( cls : int , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : int ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
SCREAMING_SNAKE_CASE : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class lowerCamelCase_ ( snake_case_ ):
_lowerCAmelCase : Dict = 'pix2struct_vision_model'
def __init__( self : List[Any] , lowerCAmelCase__ : Union[str, Any]=7_68 , lowerCAmelCase__ : List[Any]=7_68 , lowerCAmelCase__ : List[Any]=20_48 , lowerCAmelCase__ : str=64 , lowerCAmelCase__ : str=12 , lowerCAmelCase__ : List[Any]=12 , lowerCAmelCase__ : List[Any]="gelu_new" , lowerCAmelCase__ : Union[str, Any]=1e-6 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : int=1e-10 , lowerCAmelCase__ : Union[str, Any]=1.0 , lowerCAmelCase__ : str=40_96 , lowerCAmelCase__ : Union[str, Any]=32 , lowerCAmelCase__ : Optional[Any]=1_28 , **lowerCAmelCase__ : List[Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_embed_hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = d_ff
SCREAMING_SNAKE_CASE : Optional[Any] = dropout_rate
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = initializer_factor
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = dense_act_fn
SCREAMING_SNAKE_CASE : List[str] = seq_len
SCREAMING_SNAKE_CASE : Optional[int] = relative_attention_num_buckets
SCREAMING_SNAKE_CASE : List[Any] = relative_attention_max_distance
SCREAMING_SNAKE_CASE : List[str] = d_kv
@classmethod
def __lowercase ( cls : Any , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
SCREAMING_SNAKE_CASE : Any = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class lowerCamelCase_ ( snake_case_ ):
_lowerCAmelCase : List[Any] = 'pix2struct'
_lowerCAmelCase : int = True
def __init__( self : Dict , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Any=1.0 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Optional[Any]=True , **lowerCAmelCase__ : List[Any] , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
if text_config is None:
SCREAMING_SNAKE_CASE : Tuple = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
SCREAMING_SNAKE_CASE : int = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
SCREAMING_SNAKE_CASE : str = PixaStructTextConfig(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = PixaStructVisionConfig(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = self.text_config.decoder_start_token_id
SCREAMING_SNAKE_CASE : int = self.text_config.pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_config.eos_token_id
SCREAMING_SNAKE_CASE : str = initializer_factor
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = self.initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = self.initializer_range
SCREAMING_SNAKE_CASE : List[str] = is_vqa
@classmethod
def __lowercase ( cls : Union[str, Any] , lowerCAmelCase__ : PixaStructTextConfig , lowerCAmelCase__ : PixaStructVisionConfig , **lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase__ )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Tuple = self.text_config.to_dict()
SCREAMING_SNAKE_CASE : List[Any] = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.model_type
return output
| 527
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( A : list[int] , A : int ):
if len(A ) < k or k < 0:
raise ValueError('''Invalid Input''' )
SCREAMING_SNAKE_CASE : Dict = sum(array[:k] )
for i in range(len(A ) - k ):
SCREAMING_SNAKE_CASE : Optional[Any] = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE : Union[str, Any] = max(A , A )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowerCAmelCase_ : Optional[int] = [randint(-1000, 1000) for i in range(100)]
lowerCAmelCase_ : Optional[int] = randint(0, 110)
print(f'The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}')
| 527
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE = {
"""configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""],
"""tokenization_roc_bert""": ["""RoCBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoCBertForCausalLM""",
"""RoCBertForMaskedLM""",
"""RoCBertForMultipleChoice""",
"""RoCBertForPreTraining""",
"""RoCBertForQuestionAnswering""",
"""RoCBertForSequenceClassification""",
"""RoCBertForTokenClassification""",
"""RoCBertLayer""",
"""RoCBertModel""",
"""RoCBertPreTrainedModel""",
"""load_tf_weights_in_roc_bert""",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 700
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 17
| 0
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=1_3 , SCREAMING_SNAKE_CASE_ : Tuple=7 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=9_9 , SCREAMING_SNAKE_CASE_ : int=3_2 , SCREAMING_SNAKE_CASE_ : List[Any]=5 , SCREAMING_SNAKE_CASE_ : List[str]=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : Dict=1_6 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : str=0.02 , SCREAMING_SNAKE_CASE_ : List[Any]=4 , ) -> List[Any]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_attention_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_choices
def _lowercase ( self : Optional[int] ) -> Dict:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_attention_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowercase ( self : Optional[Any] ) -> Dict:
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowercase ( self : List[Any] ) -> Tuple:
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = True
lowercase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Union[str, Any] = True
a :Any = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowercase ( self : List[str] ) -> Optional[int]:
lowercase_ = FlaxBertModelTester(self )
@slow
def _lowercase ( self : Tuple ) -> Dict:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
lowercase_ = FlaxBertModel.from_pretrained('''bert-base-cased''' )
lowercase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
| 97
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase = {
"""gpt-neox-20b""": 2_048,
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , __UpperCamelCase : int=None , __UpperCamelCase : Any=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Tuple="<|endoftext|>" , __UpperCamelCase : int="<|endoftext|>" , __UpperCamelCase : Dict="<|endoftext|>" , __UpperCamelCase : Union[str, Any]=False , **__UpperCamelCase : Union[str, Any] , ) -> Any:
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
_UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __UpperCamelCase ) != add_prefix_space:
_UpperCamelCase = getattr(__UpperCamelCase , pre_tok_state.pop('''type''' ) )
_UpperCamelCase = add_prefix_space
_UpperCamelCase = pre_tok_class(**__UpperCamelCase )
_UpperCamelCase = add_prefix_space
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
_UpperCamelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : "Conversation" ) -> List[int]:
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 420
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["PerceiverFeatureExtractor"]
__UpperCAmelCase = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 709
|
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = "PoolFormerConfig"
# Base docstring
__UpperCAmelCase = "sail/poolformer_s12"
__UpperCAmelCase = [1, 512, 7, 7]
# Image classification docstring
__UpperCAmelCase = "sail/poolformer_s12"
__UpperCAmelCase = "tabby, tabby cat"
__UpperCAmelCase = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCAmelCase_ ( __A : Tuple , __A : float = 0.0 , __A : bool = False ):
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
snake_case: Union[str, Any] = 1 - drop_prob
snake_case: List[Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case: List[Any] = keep_prob + torch.rand(__A , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
snake_case: Any = input.div(__A ) * random_tensor
return output
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = drop_prob
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return drop_path(SCREAMING_SNAKE_CASE__ , self.drop_prob , self.training )
def _UpperCamelCase ( self ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
super().__init__()
snake_case: List[str] = patch_size if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case: List[str] = stride if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (stride, stride)
snake_case: Union[str, Any] = padding if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (padding, padding)
snake_case: Any = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = norm_layer(SCREAMING_SNAKE_CASE__ ) if norm_layer else nn.Identity()
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = self.projection(SCREAMING_SNAKE_CASE__ )
snake_case: Any = self.norm(SCREAMING_SNAKE_CASE__ )
return embeddings
class SCREAMING_SNAKE_CASE ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: str = nn.AvgPoolad(SCREAMING_SNAKE_CASE__ , stride=1 , padding=pool_size // 2 , count_include_pad=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.pool(SCREAMING_SNAKE_CASE__ ) - hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: List[Any] = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
snake_case: Any = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
snake_case: str = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ )
if isinstance(config.hidden_act , SCREAMING_SNAKE_CASE__ ):
snake_case: Tuple = ACTaFN[config.hidden_act]
else:
snake_case: int = config.hidden_act
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = self.conva(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.act_fn(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.drop(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.conva(SCREAMING_SNAKE_CASE__ )
snake_case: str = self.drop(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Tuple = PoolFormerPooling(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = PoolFormerOutput(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Dict = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ )
# Useful for training neural nets
snake_case: Union[str, Any] = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ ) if drop_path > 0.0 else nn.Identity()
snake_case: Optional[Any] = config.use_layer_scale
if config.use_layer_scale:
snake_case: Any = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ )
snake_case: int = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if self.use_layer_scale:
snake_case: str = self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) )
snake_case: Dict = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case: str = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = ()
snake_case: Dict = self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) )
snake_case: Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case: Any = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = (output,) + outputs
return outputs
else:
snake_case: Optional[Any] = self.drop_path(self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) ) )
# First residual connection
snake_case: Union[str, Any] = pooling_output + hidden_states
snake_case: List[Any] = ()
# Second residual connection inside the PoolFormerOutput block
snake_case: List[str] = self.drop_path(self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) ) )
snake_case: Dict = hidden_states + layer_output
snake_case: Optional[Any] = (output,) + outputs
return outputs
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: List[Any] = config
# stochastic depth decay rule
snake_case: List[Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case: Union[str, Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case: List[Any] = nn.ModuleList(SCREAMING_SNAKE_CASE__ )
# Transformer blocks
snake_case: str = []
snake_case: int = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case: List[str] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
SCREAMING_SNAKE_CASE__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(SCREAMING_SNAKE_CASE__ ) )
snake_case: Tuple = nn.ModuleList(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True ):
'''simple docstring'''
snake_case: str = () if output_hidden_states else None
snake_case: Dict = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case , snake_case: Dict = layers
# Get patch embeddings from hidden_states
snake_case: int = embedding_layer(SCREAMING_SNAKE_CASE__ )
# Send the embeddings through the blocks
for _, blk in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = blk(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = layer_outputs[0]
if output_hidden_states:
snake_case: List[str] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = PoolFormerConfig
__UpperCamelCase = "poolformer"
__UpperCamelCase = "pixel_values"
__UpperCamelCase = True
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case: List[Any] = value
__UpperCAmelCase = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , snake_case , )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = config
snake_case: Tuple = PoolFormerEncoder(SCREAMING_SNAKE_CASE__ )
# Initialize weights and apply final processing
self.post_init()
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
snake_case: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case: List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
snake_case: Optional[Any] = self.encoder(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )
snake_case: List[Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__()
snake_case: Any = nn.Linear(config.hidden_size , config.hidden_size )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: int = self.dense(SCREAMING_SNAKE_CASE__ )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , snake_case , )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = config.num_labels
snake_case: str = PoolFormerModel(SCREAMING_SNAKE_CASE__ )
# Final norm
snake_case: int = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case: Dict = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
'''simple docstring'''
snake_case: Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case: Optional[Any] = self.poolformer(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )
snake_case: Any = outputs[0]
snake_case: str = self.classifier(self.norm(SCREAMING_SNAKE_CASE__ ).mean([-2, -1] ) )
snake_case: Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case: Tuple = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case: Dict = 'single_label_classification'
else:
snake_case: List[str] = 'multi_label_classification'
if self.config.problem_type == "regression":
snake_case: Union[str, Any] = MSELoss()
if self.num_labels == 1:
snake_case: List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case: int = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config.problem_type == "single_label_classification":
snake_case: Union[str, Any] = CrossEntropyLoss()
snake_case: Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case: int = BCEWithLogitsLoss()
snake_case: Optional[int] = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
snake_case: str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states )
| 692
| 0
|
from math import ceil, sqrt
def lowerCamelCase__ ( _a = 1000000):
SCREAMING_SNAKE_CASE : List[Any] = 0
for outer_width in range(3 , (limit // 4) + 2):
if outer_width**2 > limit:
SCREAMING_SNAKE_CASE : str = max(ceil(sqrt(outer_width**2 - limit)) , 1)
else:
SCREAMING_SNAKE_CASE : str = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
lowerCAmelCase_ : Optional[int] = logging.getLogger(__name__)
@dataclass
class lowerCamelCase_ :
_lowerCAmelCase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowerCAmelCase : Optional[str] = field(
default=snake_case_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowerCAmelCase : Optional[str] = field(
default=snake_case_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowerCAmelCase : Optional[str] = field(
default=snake_case_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_lowerCAmelCase : bool = field(
default=snake_case_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
_lowerCAmelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCAmelCase : bool = field(
default=snake_case_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class lowerCamelCase_ :
_lowerCAmelCase : Optional[str] = field(default=snake_case_ , metadata={'help': 'The input training data file (a text file).'} )
_lowerCAmelCase : Optional[str] = field(
default=snake_case_ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
_lowerCAmelCase : bool = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
_lowerCAmelCase : Optional[int] = field(
default=snake_case_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
_lowerCAmelCase : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_lowerCAmelCase : bool = field(
default=snake_case_ , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
_lowerCAmelCase : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCAmelCase : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __lowercase ( self : Tuple ):
"""simple docstring"""
if self.train_file is not None:
SCREAMING_SNAKE_CASE : List[str] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCamelCase_ :
_lowerCAmelCase : PreTrainedTokenizerBase
_lowerCAmelCase : Union[bool, str, PaddingStrategy] = True
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[int] = None
def __call__( self : List[Any] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = '''label''' if '''label''' in features[0].keys() else '''labels'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [feature.pop(lowerCAmelCase__ ) for feature in features]
SCREAMING_SNAKE_CASE : List[Any] = len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = len(features[0]['''input_ids'''] )
SCREAMING_SNAKE_CASE : str = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase__ )] for feature in features
]
SCREAMING_SNAKE_CASE : Optional[int] = list(chain(*lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = self.tokenizer.pad(
lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
SCREAMING_SNAKE_CASE : int = {k: v.view(lowerCAmelCase__ , lowerCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCAmelCase__ , dtype=torch.intaa )
return batch
def UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , A , A )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level()
logger.setLevel(A )
datasets.utils.logging.set_verbosity(A )
transformers.utils.logging.set_verbosity(A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if data_args.train_file is not None:
SCREAMING_SNAKE_CASE : Dict = data_args.train_file
if data_args.validation_file is not None:
SCREAMING_SNAKE_CASE : Dict = data_args.validation_file
SCREAMING_SNAKE_CASE : List[str] = data_args.train_file.split('''.''' )[-1]
SCREAMING_SNAKE_CASE : Optional[int] = load_dataset(
A , data_files=A , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE : int = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
SCREAMING_SNAKE_CASE : Optional[Any] = [F"""ending{i}""" for i in range(4 )]
SCREAMING_SNAKE_CASE : Any = '''sent1'''
SCREAMING_SNAKE_CASE : Tuple = '''sent2'''
if data_args.max_seq_length is None:
SCREAMING_SNAKE_CASE : List[str] = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
SCREAMING_SNAKE_CASE : List[str] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
SCREAMING_SNAKE_CASE : Tuple = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(A : Any ):
SCREAMING_SNAKE_CASE : int = [[context] * 4 for context in examples[context_name]]
SCREAMING_SNAKE_CASE : str = examples[question_header_name]
SCREAMING_SNAKE_CASE : List[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(A )
]
# Flatten out
SCREAMING_SNAKE_CASE : List[str] = list(chain(*A ) )
SCREAMING_SNAKE_CASE : int = list(chain(*A ) )
# Tokenize
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(
A , A , truncation=A , max_length=A , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(A ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
SCREAMING_SNAKE_CASE : str = raw_datasets['''train''']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = min(len(A ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE : Optional[Any] = train_dataset.select(range(A ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
SCREAMING_SNAKE_CASE : str = train_dataset.map(
A , batched=A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
SCREAMING_SNAKE_CASE : str = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : List[Any] = min(len(A ) , data_args.max_eval_samples )
SCREAMING_SNAKE_CASE : int = eval_dataset.select(range(A ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
SCREAMING_SNAKE_CASE : Tuple = eval_dataset.map(
A , batched=A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
SCREAMING_SNAKE_CASE : Optional[int] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=A , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(A : str ):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = eval_predictions
SCREAMING_SNAKE_CASE : Any = np.argmax(A , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Union[str, Any] = Trainer(
model=A , args=A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=A , data_collator=A , compute_metrics=A , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : List[Any] = last_checkpoint
SCREAMING_SNAKE_CASE : List[Any] = trainer.train(resume_from_checkpoint=A )
trainer.save_model() # Saves the tokenizer too for easy upload
SCREAMING_SNAKE_CASE : List[str] = train_result.metrics
SCREAMING_SNAKE_CASE : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A )
)
SCREAMING_SNAKE_CASE : Dict = min(A , len(A ) )
trainer.log_metrics('''train''' , A )
trainer.save_metrics('''train''' , A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE : Optional[int] = trainer.evaluate()
SCREAMING_SNAKE_CASE : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(A )
SCREAMING_SNAKE_CASE : List[str] = min(A , len(A ) )
trainer.log_metrics('''eval''' , A )
trainer.save_metrics('''eval''' , A )
SCREAMING_SNAKE_CASE : Dict = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**A )
else:
trainer.create_model_card(**A )
def UpperCAmelCase ( A : str ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 527
| 0
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , "tf_padding" ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , "depth_multiplier" ) )
class snake_case__ :
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any=13 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : Dict=0.25 , __lowerCamelCase : List[str]=8 , __lowerCamelCase : Any=True , __lowerCamelCase : int=10_24 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : Optional[Any]="relu6" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=10 , __lowerCamelCase : Any=None , ) -> Tuple:
a = parent
a = batch_size
a = num_channels
a = image_size
a = depth_multiplier
a = min_depth
a = tf_padding
a = int(last_hidden_size * depth_multiplier )
a = output_stride
a = hidden_act
a = classifier_dropout_prob
a = use_labels
a = is_training
a = num_labels
a = initializer_range
a = scope
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.num_labels )
a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ) -> List[str]:
a = MobileNetVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : str ) -> Union[str, Any]:
a = self.num_labels
a = MobileNetVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : str ) -> Any:
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Any = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
a = MobileNetVaModelTester(self )
a = MobileNetVaConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def __UpperCAmelCase ( self : Any ) -> List[str]:
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def __UpperCAmelCase ( self : List[Any] ) -> int:
pass
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCamelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> int:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
def check_hidden_states_output(__lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ):
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
a = outputs.hidden_states
a = 26
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = MobileNetVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __magic_name__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : List[str] ) -> Any:
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self : str ) -> Dict:
a = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(__lowerCamelCase )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCamelCase )
# verify the logits
a = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
a = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 662
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]:
a = tokenizer
a = dataset
a = len(__lowerCamelCase ) if n_tasks is None else n_tasks
a = n_copies
def __iter__( self : Tuple ) -> str:
a = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
a = start_length
a = eof_strings
a = tokenizer
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = re.split("(%s)" % "|".join(A ), A )
# last string should be ""
return "".join(string_list[:-2] )
def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ):
'''simple docstring'''
a = defaultdict(A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A ) ):
with torch.no_grad():
a = batch["ids"].shape[-1]
a = accelerator.unwrap_model(A ).generate(
input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A )
# each task is generated batch_size times
a = batch["task_id"].repeat(A )
a = accelerator.pad_across_processes(
A, dim=1, pad_index=tokenizer.pad_token_id )
a , a = accelerator.gather((generated_tokens, generated_tasks) )
a = generated_tokens.cpu().numpy()
a = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A, A ):
gen_token_dict[task].append(A )
a = [[] for _ in range(A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A )
code_gens[task].append(remove_last_block(A ) )
return code_gens
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser(A )
a = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a = "false"
if args.num_workers is None:
a = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a = Accelerator()
set_seed(args.seed, device_specific=A )
# Load model and tokenizer
a = AutoTokenizer.from_pretrained(args.model_ckpt )
a = tokenizer.eos_token
a = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ),
}
# Load evaluation dataset and metric
a = load_dataset("openai_humaneval" )
a = load_metric("code_eval" )
a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
a = args.n_samples // args.batch_size
a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a = DataLoader(A, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a = code_eval_metric.compute(references=[""], predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
a , a = accelerator.prepare(A, A )
a = complete_code(
A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, )
if accelerator.is_main_process:
a = []
for task in tqdm(range(A ) ):
a = human_eval["test"][task]["test"]
a = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
a , a = code_eval_metric.compute(
references=A, predictions=A, num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file, "w" ) as fp:
json.dump(A, A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 662
| 1
|
from __future__ import annotations
from collections.abc import Iterator
class _a :
def __init__( self: Union[str, Any] , UpperCamelCase_: int ) -> None:
"""simple docstring"""
lowercase__ = value
lowercase__ = None
lowercase__ = None
class _a :
def __init__( self: Any , UpperCamelCase_: Node ) -> None:
"""simple docstring"""
lowercase__ = tree
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self: Any ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _A ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : int = RoFormerTokenizer
_snake_case : Optional[Any] = RoFormerTokenizerFast
_snake_case : int = True
_snake_case : Tuple = True
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
def _snake_case ( self : Optional[int] , **lowerCamelCase : int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **lowerCamelCase )
def _snake_case ( self : List[Any] , **lowerCamelCase : List[str] ):
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **lowerCamelCase )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = "永和服装饰品有限公司,今天天气非常好"
__lowercase = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase , __lowercase = self.get_chinese_input_output_texts()
__lowercase = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , output_text.split() )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.get_rust_tokenizer()
__lowercase , __lowercase = self.get_chinese_input_output_texts()
__lowercase = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , output_text.split() )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
def _snake_case ( self : int ):
'''simple docstring'''
pass
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _snake_case ( self : str ):
'''simple docstring'''
pass
| 402
| 0
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = ['image_processor']
SCREAMING_SNAKE_CASE__ = 'SamImageProcessor'
def __init__( self ,a_ ):
"""simple docstring"""
super().__init__(a_ )
lowerCAmelCase__ = self.image_processor
lowerCAmelCase__ = -10
lowerCAmelCase__ = self.image_processor.size['longest_edge']
def __call__( self ,a_=None ,a_=None ,a_=None ,a_=None ,a_ = None ,**a_ ,):
"""simple docstring"""
lowerCAmelCase__ = self.image_processor(
a_ ,return_tensors=a_ ,**a_ ,)
# pop arguments that are not used in the foward but used nevertheless
lowerCAmelCase__ = encoding_image_processor['original_sizes']
if hasattr(a_ ,'numpy' ): # Checks if Torch or TF tensor
lowerCAmelCase__ = original_sizes.numpy()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._check_and_preprocess_points(
input_points=a_ ,input_labels=a_ ,input_boxes=a_ ,)
lowerCAmelCase__ = self._normalize_and_convert(
a_ ,a_ ,input_points=a_ ,input_labels=a_ ,input_boxes=a_ ,return_tensors=a_ ,)
return encoding_image_processor
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_=None ,a_=None ,a_=None ,a_="pt" ,):
"""simple docstring"""
if input_points is not None:
if len(a_ ) != len(a_ ):
lowerCAmelCase__ = [
self._normalize_coordinates(self.target_size ,a_ ,original_sizes[0] ) for point in input_points
]
else:
lowerCAmelCase__ = [
self._normalize_coordinates(self.target_size ,a_ ,a_ )
for point, original_size in zip(a_ ,a_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
lowerCAmelCase__ , lowerCAmelCase__ = self._pad_points_and_labels(a_ ,a_ )
lowerCAmelCase__ = np.array(a_ )
if input_labels is not None:
lowerCAmelCase__ = np.array(a_ )
if input_boxes is not None:
if len(a_ ) != len(a_ ):
lowerCAmelCase__ = [
self._normalize_coordinates(self.target_size ,a_ ,original_sizes[0] ,is_bounding_box=a_ )
for box in input_boxes
]
else:
lowerCAmelCase__ = [
self._normalize_coordinates(self.target_size ,a_ ,a_ ,is_bounding_box=a_ )
for box, original_size in zip(a_ ,a_ )
]
lowerCAmelCase__ = np.array(a_ )
if input_boxes is not None:
if return_tensors == "pt":
lowerCAmelCase__ = torch.from_numpy(a_ )
# boxes batch size of 1 by default
lowerCAmelCase__ = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
lowerCAmelCase__ = tf.convert_to_tensor(a_ )
# boxes batch size of 1 by default
lowerCAmelCase__ = tf.expand_dims(a_ ,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
lowerCAmelCase__ = torch.from_numpy(a_ )
# point batch size of 1 by default
lowerCAmelCase__ = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
lowerCAmelCase__ = tf.convert_to_tensor(a_ )
# point batch size of 1 by default
lowerCAmelCase__ = tf.expand_dims(a_ ,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
lowerCAmelCase__ = torch.from_numpy(a_ )
# point batch size of 1 by default
lowerCAmelCase__ = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
lowerCAmelCase__ = tf.convert_to_tensor(a_ )
# point batch size of 1 by default
lowerCAmelCase__ = tf.expand_dims(a_ ,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = max([point.shape[0] for point in input_points] )
lowerCAmelCase__ = []
for i, point in enumerate(a_ ):
if point.shape[0] != expected_nb_points:
lowerCAmelCase__ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] ,axis=0 )
lowerCAmelCase__ = np.append(input_labels[i] ,[self.point_pad_value] )
processed_input_points.append(a_ )
lowerCAmelCase__ = processed_input_points
return input_points, input_labels
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ,a_=False ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = original_size
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor._get_preprocess_shape(a_ ,longest_edge=a_ )
lowerCAmelCase__ = deepcopy(a_ ).astype(a_ )
if is_bounding_box:
lowerCAmelCase__ = coords.reshape(-1 ,2 ,2 )
lowerCAmelCase__ = coords[..., 0] * (new_w / old_w)
lowerCAmelCase__ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowerCAmelCase__ = coords.reshape(-1 ,4 )
return coords
def SCREAMING_SNAKE_CASE_ ( self ,a_=None ,a_=None ,a_=None ,):
"""simple docstring"""
if input_points is not None:
if hasattr(a_ ,'numpy' ): # Checks for TF or Torch tensor
lowerCAmelCase__ = input_points.numpy().tolist()
if not isinstance(a_ ,a_ ) or not isinstance(input_points[0] ,a_ ):
raise ValueError('Input points must be a list of list of floating points.' )
lowerCAmelCase__ = [np.array(a_ ) for input_point in input_points]
else:
lowerCAmelCase__ = None
if input_labels is not None:
if hasattr(a_ ,'numpy' ):
lowerCAmelCase__ = input_labels.numpy().tolist()
if not isinstance(a_ ,a_ ) or not isinstance(input_labels[0] ,a_ ):
raise ValueError('Input labels must be a list of list integers.' )
lowerCAmelCase__ = [np.array(a_ ) for label in input_labels]
else:
lowerCAmelCase__ = None
if input_boxes is not None:
if hasattr(a_ ,'numpy' ):
lowerCAmelCase__ = input_boxes.numpy().tolist()
if (
not isinstance(a_ ,a_ )
or not isinstance(input_boxes[0] ,a_ )
or not isinstance(input_boxes[0][0] ,a_ )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
lowerCAmelCase__ = [np.array(a_ ).astype(np.floataa ) for box in input_boxes]
else:
lowerCAmelCase__ = None
return input_points, input_labels, input_boxes
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(a_ ) )
def SCREAMING_SNAKE_CASE_ ( self ,*a_ ,**a_ ):
"""simple docstring"""
return self.image_processor.post_process_masks(*a_ ,**a_ )
| 604
|
import heapq
import sys
import numpy as np
_lowerCAmelCase : str = tuple[int, int]
class __snake_case :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = set()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return len(self.elements ) == 0
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements ,(priority, item) )
self.set.add(a_ )
else:
# update
# print("update", item)
lowerCAmelCase__ = []
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements ,(pro, xxx) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
if item in self.set:
self.set.remove(a_ )
lowerCAmelCase__ = []
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements ,(prito, yyy) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.elements[0][1]
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
self.set.remove(a_ )
return (priority, item)
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = np.array(snake_case__ )
lowerCAmelCase__ = np.array(snake_case__ )
return np.linalg.norm(a - b )
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> Any:
"""simple docstring"""
return consistent_heuristic(snake_case__ , snake_case__ ) // t
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> str:
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = g_function[start] + Wa * heuristics[i](snake_case__ , snake_case__ )
return ans
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = np.chararray((n, n) )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
lowerCAmelCase__ = '*'
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if (j, (n - 1) - i) in blocks:
lowerCAmelCase__ = '#'
lowerCAmelCase__ = '-'
lowerCAmelCase__ = back_pointer[goal]
while x != start:
((lowerCAmelCase__) , (lowerCAmelCase__)) = x
# print(x)
lowerCAmelCase__ = '-'
lowerCAmelCase__ = back_pointer[x]
lowerCAmelCase__ = '-'
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowerCAmelCase__ = back_pointer[goal]
while x != start:
print(snake_case__ , end=' ' )
lowerCAmelCase__ = back_pointer[x]
print(snake_case__ )
sys.exit()
def UpperCAmelCase_ ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Union[str, Any]:
"""simple docstring"""
for itera in range(snake_case__ ):
open_list[itera].remove_element(snake_case__ )
# print("s", s)
# print("j", j)
((lowerCAmelCase__) , (lowerCAmelCase__)) = s
lowerCAmelCase__ = (x - 1, y)
lowerCAmelCase__ = (x + 1, y)
lowerCAmelCase__ = (x, y + 1)
lowerCAmelCase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(snake_case__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(snake_case__ )
lowerCAmelCase__ = -1
lowerCAmelCase__ = float('inf' )
if valid(snake_case__ ) and g_function[neighbours] > g_function[s] + 1:
lowerCAmelCase__ = g_function[s] + 1
lowerCAmelCase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(snake_case__ , key(snake_case__ , 0 , snake_case__ , snake_case__ ) )
if neighbours not in close_list_inad:
for var in range(1 , snake_case__ ):
if key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) <= Wa * key(
snake_case__ , 0 , snake_case__ , snake_case__ ):
open_list[j].put(
snake_case__ , key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) )
def UpperCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
_lowerCAmelCase : Tuple = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
_lowerCAmelCase : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(1_0, 1),
(1_1, 1),
(1_2, 1),
(1_3, 1),
(1_4, 1),
(1_5, 1),
(1_6, 1),
(1_7, 1),
(1_8, 1),
(1_9, 1),
]
_lowerCAmelCase : Any = make_common_ground()
_lowerCAmelCase : List[str] = blocks_blk
# hyper parameters
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : int = 2_0
_lowerCAmelCase : str = 3 # one consistent and two other inconsistent
# start and end destination
_lowerCAmelCase : Tuple = (0, 0)
_lowerCAmelCase : List[str] = (n - 1, n - 1)
_lowerCAmelCase : str = 1
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ) -> str:
"""simple docstring"""
lowerCAmelCase__ = {start: 0, goal: float('inf' )}
lowerCAmelCase__ = {start: -1, goal: -1}
lowerCAmelCase__ = []
lowerCAmelCase__ = set()
for i in range(snake_case__ ):
open_list.append(PriorityQueue() )
open_list[i].put(snake_case__ , key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , snake_case__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(snake_case__ , snake_case__ , snake_case__ )
else:
lowerCAmelCase__ , lowerCAmelCase__ = open_list[i].top_show()
visited.add(snake_case__ )
expand_state(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
close_list_inad.append(snake_case__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(snake_case__ , snake_case__ , snake_case__ )
else:
lowerCAmelCase__ = open_list[0].top_show()
visited.add(snake_case__ )
expand_state(
snake_case__ , 0 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
close_list_anchor.append(snake_case__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(snake_case__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 604
| 1
|
UpperCamelCase__ : Optional[Any] = 0 # The first color of the flag.
UpperCamelCase__ : Dict = 1 # The second color of the flag.
UpperCamelCase__ : int = 2 # The third color of the flag.
UpperCamelCase__ : Tuple = (red, white, blue)
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
if not sequence:
return []
if len(_A ) == 1:
return list(_A )
a = 0
a = len(_A ) - 1
a = 0
while mid <= high:
if sequence[mid] == colors[0]:
a , a = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
a , a = sequence[high], sequence[mid]
high -= 1
else:
a = f"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(_A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : str = input("""Enter numbers separated by commas:\n""").strip()
UpperCamelCase__ : Any = [int(item.strip()) for item in user_input.split(""",""")]
print(F"{dutch_national_flag_sort(unsorted)}")
| 387
|
import os
import pytest
from attr import dataclass
_a : int = 'us-east-1' # defaults region
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
A = 42
A = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
A = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 16,
'''per_device_eval_batch_size''': 16,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 500,
'''save_steps''': 5500,
}
A = {**hyperparameters, '''max_steps''': 1000}
@property
def lowerCamelCase_ ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCamelCase_ ( self ):
return f'''{self.framework}-transfromers-test'''
@property
def lowerCamelCase_ ( self ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCamelCase_ ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def UpperCamelCase__ ( _A: Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 479
| 0
|
'''simple docstring'''
from __future__ import annotations
def A__ ( A : str , A : str):
'''simple docstring'''
UpperCamelCase : List[Any] = get_failure_array(A)
# 2) Step through text searching for pattern
UpperCamelCase : Any = 0, 0 # index into text, pattern
while i < len(A):
if pattern[j] == text[i]:
if j == (len(A) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCamelCase : List[Any] = failure[j - 1]
continue
i += 1
return False
def A__ ( A : str):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = [0]
UpperCamelCase : List[str] = 0
UpperCamelCase : List[Any] = 1
while j < len(A):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCamelCase : int = failure[i - 1]
continue
j += 1
failure.append(A)
return failure
if __name__ == "__main__":
# Test 1)
lowerCAmelCase_ = 'abc1abc12'
lowerCAmelCase_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
lowerCAmelCase_ = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCAmelCase_ = 'ABABX'
lowerCAmelCase_ = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
lowerCAmelCase_ = 'AAAB'
lowerCAmelCase_ = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
lowerCAmelCase_ = 'abcdabcy'
lowerCAmelCase_ = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
lowerCAmelCase_ = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 720
|
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Tuple = torch.nn.Linear(2 , 4)
UpperCamelCase : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0)
UpperCamelCase : List[Any] = torch.optim.lr_scheduler.OneCycleLR(A , max_lr=0.01 , steps_per_epoch=2 , epochs=1)
UpperCamelCase : List[Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3])))
UpperCamelCase : List[str] = DataLoader(TensorDataset(torch.tensor([4, 5, 6])))
return model, optimizer, scheduler, train_dl, valid_dl
def A__ ( A : int):
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def A__ ( A : int):
'''simple docstring'''
UpperCamelCase : Optional[int] = torch.nn.Linear(*tuple(model.weight.T.shape)).state_dict()
model.load_state_dict(A)
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_cuda
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Tuple = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowerCamelCase ):
UpperCamelCase : int = Accelerator(cpu=lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
UpperCamelCase : Dict = Accelerator()
UpperCamelCase : Any = GradientState()
assert state.num_steps == 1
UpperCamelCase : List[Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
UpperCamelCase : Optional[int] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Optional[int] = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = create_components()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Any = accelerator.prepare(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
UpperCamelCase : Tuple = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = create_components()
accelerator.prepare(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*lowerCamelCase , **lowerCamelCase ):
pass
with patch("torch.cuda.set_device" , lowerCamelCase ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
UpperCamelCase : Union[str, Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : int = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = create_components()
accelerator.prepare(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCamelCase : str = get_signature(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase )
# make sure random weights don't match
load_random_weights(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) < 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Optional[int] = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : int = create_components()
accelerator.prepare(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCamelCase : List[Any] = get_signature(lowerCamelCase )
# saving hook
def save_config(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCamelCase : str = {"class_name": models[0].__class__.__name__}
with open(os.path.join(lowerCamelCase , "data.json" ) , "w" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
# loading hook
def load_config(lowerCamelCase , lowerCamelCase ):
with open(os.path.join(lowerCamelCase , "data.json" ) , "r" ) as f:
UpperCamelCase : Optional[int] = json.load(lowerCamelCase )
UpperCamelCase : int = config["class_name"]
UpperCamelCase : Dict = accelerator.register_save_state_pre_hook(lowerCamelCase )
UpperCamelCase : Union[str, Any] = accelerator.register_load_state_pre_hook(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase )
# make sure random weights don't match with hooks
load_random_weights(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
UpperCamelCase : Union[str, Any] = "random"
# make sure loaded weights match with hooks
accelerator.load_state(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase )
# make sure random weights don't match with hooks removed
load_random_weights(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
UpperCamelCase : Any = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : Optional[int] = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = create_components()
UpperCamelCase : int = None
# This should work
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : List[str] = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = create_components()
UpperCamelCase : Union[str, Any] = [1, 2, 3]
# This should work
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertEqual(
getattr(lowerCamelCase , "_is_accelerate_prepared" , lowerCamelCase ) , lowerCamelCase , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(lowerCamelCase , "_is_accelerate_prepared" , lowerCamelCase ) , lowerCamelCase , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase , "_is_accelerate_prepared" , lowerCamelCase ) , lowerCamelCase , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase , "_is_accelerate_prepared" , lowerCamelCase ) , lowerCamelCase , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase , "_is_accelerate_prepared" , lowerCamelCase ) , lowerCamelCase , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase , "_is_accelerate_prepared" , lowerCamelCase ) , lowerCamelCase , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
UpperCamelCase : Dict = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCamelCase , device_map={"": 0} , )
UpperCamelCase : str = Accelerator()
# This should work
UpperCamelCase : Any = accelerator.prepare(lowerCamelCase )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
from transformers import AutoModelForCausalLM
UpperCamelCase : Optional[Any] = Accelerator()
with init_empty_weights():
UpperCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
UpperCamelCase : Optional[int] = infer_auto_device_map(lowerCamelCase )
UpperCamelCase : Dict = "cpu"
UpperCamelCase : int = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=lowerCamelCase , load_in_abit=lowerCamelCase , llm_inta_enable_fpaa_cpu_offload=lowerCamelCase )
# This should not work and get value error
with self.assertRaises(lowerCamelCase ):
UpperCamelCase : Dict = accelerator.prepare(lowerCamelCase )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
from transformers import AutoModelForCausalLM
UpperCamelCase : Dict = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
UpperCamelCase : Dict = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
UpperCamelCase : List[Any] = infer_auto_device_map(lowerCamelCase )
UpperCamelCase : Any = 1
UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCamelCase , device_map=lowerCamelCase , )
UpperCamelCase : Optional[int] = Accelerator()
# This should not work and get value error
with self.assertRaises(lowerCamelCase ):
UpperCamelCase : Dict = accelerator.prepare(lowerCamelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
UpperCamelCase : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
UpperCamelCase : Union[str, Any] = infer_auto_device_map(lowerCamelCase )
UpperCamelCase : Tuple = 1
UpperCamelCase : List[str] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCamelCase , device_map=lowerCamelCase , )
UpperCamelCase : Tuple = Accelerator()
# This should work
UpperCamelCase : Optional[Any] = accelerator.prepare(lowerCamelCase )
@require_cuda
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : int = torch.nn.Linear(10 , 10 )
UpperCamelCase : Dict = torch.optim.SGD(model.parameters() , lr=0.01 )
UpperCamelCase : Optional[Any] = Accelerator(cpu=lowerCamelCase )
UpperCamelCase : Any = accelerator.prepare(lowerCamelCase )
| 435
| 0
|
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_UpperCAmelCase = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
_UpperCAmelCase = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
_UpperCAmelCase = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
_UpperCAmelCase = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
_UpperCAmelCase = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Tuple ):
'''simple docstring'''
for tf_name, hf_name in patterns:
A_ : int = k.replace(lowerCAmelCase_ ,lowerCAmelCase_ )
return k
def UpperCamelCase ( __lowercase : dict ,__lowercase : dict ):
'''simple docstring'''
A_ : int = BigBirdPegasusConfig(**lowerCAmelCase_ )
A_ : List[Any] = BigBirdPegasusForConditionalGeneration(lowerCAmelCase_ )
A_ : Dict = torch_model.state_dict()
A_ : Optional[Any] = {}
# separating decoder weights
A_ : Optional[int] = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
A_ : int = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() ,'tf -> hf conversion' ):
A_ : Tuple = [k.endswith(lowerCAmelCase_ ) for ending in KEYS_TO_IGNORE]
if any(lowerCAmelCase_ ):
continue
A_ : int = DECODER_PATTERNS
A_ : List[Any] = rename_state_dict_key(lowerCAmelCase_ ,lowerCAmelCase_ )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
A_ : Tuple = v.T
A_ : int = torch.from_numpy(lowerCAmelCase_ )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() ,'tf -> hf conversion' ):
A_ : Optional[int] = [k.endswith(lowerCAmelCase_ ) for ending in KEYS_TO_IGNORE]
if any(lowerCAmelCase_ ):
continue
A_ : str = REMAINING_PATTERNS
A_ : Dict = rename_state_dict_key(lowerCAmelCase_ ,lowerCAmelCase_ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
A_ : List[Any] = v.T
A_ : int = torch.from_numpy(lowerCAmelCase_ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
A_ : List[str] = mapping['model.embed_positions.weight']
A_ : Any = mapping.pop('model.embed_positions.weight' )
A_ , A_ : Any = torch_model.load_state_dict(lowerCAmelCase_ ,strict=lowerCAmelCase_ )
A_ : int = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( __lowercase : Any ):
'''simple docstring'''
A_ : Union[str, Any] = tf.train.list_variables(lowerCAmelCase_ )
A_ : int = {}
A_ : Tuple = ['global_step']
for name, shape in tqdm(lowerCAmelCase_ ,desc='converting tf checkpoint to dict' ):
A_ : List[str] = any(pat in name for pat in ignore_name )
if skip_key:
continue
A_ : List[str] = tf.train.load_variable(lowerCAmelCase_ ,lowerCAmelCase_ )
A_ : int = array
return tf_weights
def UpperCamelCase ( __lowercase : str ,__lowercase : str ,__lowercase : dict ):
'''simple docstring'''
A_ : Optional[Any] = get_tf_weights_as_numpy(lowerCAmelCase_ )
A_ : str = convert_bigbird_pegasus(lowerCAmelCase_ ,lowerCAmelCase_ )
torch_model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 558
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : Any ) -> int:
lowerCAmelCase__ = "ZinengTang/tvlt-base"
lowerCAmelCase__ = tempfile.mkdtemp()
def a ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
return TvltImageProcessor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE__ )
def a ( self : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> str:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> Any:
shutil.rmtree(self.tmpdirname )
def a ( self : Any ) -> Union[str, Any]:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> List[Any]:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = np.ones([12_000] )
lowerCAmelCase__ = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="np" )
lowerCAmelCase__ = processor(audio=SCREAMING_SNAKE_CASE__ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Dict ) -> str:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = np.ones([3, 224, 224] )
lowerCAmelCase__ = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors="np" )
lowerCAmelCase__ = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : int ) -> Any:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = np.ones([12_000] )
lowerCAmelCase__ = np.ones([3, 224, 224] )
lowerCAmelCase__ = processor(audio=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def a ( self : Tuple ) -> Optional[Any]:
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 61
| 0
|
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] ) -> List[str]:
__snake_case = FileLock(str(tmpdir / "foo.lock" ) )
__snake_case = FileLock(str(tmpdir / "foo.lock" ) )
__snake_case = 0.01
with locka.acquire():
with pytest.raises(_UpperCAmelCase ):
__snake_case = time.time()
locka.acquire(_UpperCAmelCase )
assert time.time() - _start > timeout
def __UpperCAmelCase ( _UpperCAmelCase : Any ) -> List[str]:
__snake_case = "a" * 10_00 + ".lock"
__snake_case = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(_UpperCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
__snake_case = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_UpperCAmelCase ):
locka.acquire(0 )
| 680
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : List[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """data2vec-text"""
def __init__( self : List[str] , a_ : str=30_522 , a_ : Optional[int]=768 , a_ : Dict=12 , a_ : int=12 , a_ : Dict=3_072 , a_ : Dict="gelu" , a_ : Optional[Any]=0.1 , a_ : List[str]=0.1 , a_ : int=512 , a_ : Any=2 , a_ : int=0.02 , a_ : Dict=1e-12 , a_ : Dict=1 , a_ : Any=0 , a_ : Dict=2 , a_ : Optional[int]="absolute" , a_ : List[Any]=True , a_ : Dict=None , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@property
def A ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 680
| 1
|
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _UpperCamelCase ( A ):
'''simple docstring'''
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase : List[Any] = tempfile.mkdtemp()
__lowerCamelCase : List[str] = 5
# Realm tok
__lowerCamelCase : int = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , """realm_tokenizer""" )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
__lowerCamelCase : Any = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__lowerCamelCase : List[str] = os.path.join(self.tmpdirname , """realm_block_records""" )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
def _snake_case ( self : Dict ):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = np.array(
[
B"""This is the first record""",
B"""This is the second record""",
B"""This is the third record""",
B"""This is the fourth record""",
B"""This is the fifth record""",
B"""This is a longer longer longer record""",
] , dtype=_lowerCamelCase , )
return block_records
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : List[Any] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : List[Any] = self.get_config()
__lowerCamelCase : List[str] = self.get_dummy_retriever()
__lowerCamelCase : List[Any] = retriever.tokenizer
__lowerCamelCase : Tuple = np.array([0, 3] , dtype="""long""" )
__lowerCamelCase : Any = tokenizer(["""Test question"""] ).input_ids
__lowerCamelCase : Any = tokenizer(
["""the fourth"""] , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ).input_ids
__lowerCamelCase : Tuple = config.reader_seq_len
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = retriever(
_lowerCamelCase , _lowerCamelCase , answer_ids=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors="""np""" )
self.assertEqual(len(_lowerCamelCase ) , 2 )
self.assertEqual(len(_lowerCamelCase ) , 2 )
self.assertEqual(len(_lowerCamelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = self.get_config()
__lowerCamelCase : Optional[int] = self.get_dummy_retriever()
__lowerCamelCase : Union[str, Any] = retriever.tokenizer
__lowerCamelCase : Any = np.array([0, 3, 5] , dtype="""long""" )
__lowerCamelCase : List[str] = tokenizer(["""Test question"""] ).input_ids
__lowerCamelCase : Optional[Any] = tokenizer(
["""the fourth""", """longer longer"""] , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ).input_ids
__lowerCamelCase : Any = config.reader_seq_len
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = retriever(
_lowerCamelCase , _lowerCamelCase , answer_ids=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors="""np""" )
self.assertEqual([False, True, True] , _lowerCamelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _lowerCamelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
# Test local path
__lowerCamelCase : int = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] , B"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
__lowerCamelCase : Any = os.path.join(
os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME )
__lowerCamelCase : Dict = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] , B"""This is the first record""" )
| 519
|
def _UpperCAmelCase ( UpperCAmelCase : int = 600_851_475_143 ):
"""simple docstring"""
try:
__lowerCamelCase : Any = int(UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
__lowerCamelCase : int = 1
__lowerCamelCase : str = 2
while i * i <= n:
while n % i == 0:
__lowerCamelCase : Union[str, Any] = i
n //= i
i += 1
if n > 1:
__lowerCamelCase : Dict = n
return int(UpperCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 519
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""transformer.blocks.{i}.norm1.weight""", f"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm1.bias""", f"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.weight""", f"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.bias""", f"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.weight""", f"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.bias""", f"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.mlp.fc1.weight""", f"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc1.bias""", f"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.weight""", f"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.bias""", f"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
lowerCamelCase : List[str] = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : Optional[int] = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.weight""" )
lowerCamelCase : int = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : List[Any] = in_proj_bias[: config.hidden_size]
lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = dct.pop(__SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = val
@torch.no_grad()
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=__SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Dict = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : Union[str, Any] = False
if "vqa" in checkpoint_url:
lowerCamelCase : List[Any] = True
lowerCamelCase : Any = 3129
lowerCamelCase : List[str] = "huggingface/label-files"
lowerCamelCase : Optional[Any] = "vqa2-id2label.json"
lowerCamelCase : Tuple = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
lowerCamelCase : str = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Any = idalabel
lowerCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase : List[str] = ViltForQuestionAnswering(__SCREAMING_SNAKE_CASE )
elif "nlvr" in checkpoint_url:
lowerCamelCase : List[Any] = True
lowerCamelCase : List[str] = 2
lowerCamelCase : Optional[Any] = {0: "False", 1: "True"}
lowerCamelCase : str = {v: k for k, v in config.idalabel.items()}
lowerCamelCase : List[Any] = 3
lowerCamelCase : Tuple = ViltForImagesAndTextClassification(__SCREAMING_SNAKE_CASE )
elif "irtr" in checkpoint_url:
lowerCamelCase : Optional[Any] = True
lowerCamelCase : Dict = ViltForImageAndTextRetrieval(__SCREAMING_SNAKE_CASE )
elif "mlm_itm" in checkpoint_url:
lowerCamelCase : List[Any] = True
lowerCamelCase : Optional[int] = ViltForMaskedLM(__SCREAMING_SNAKE_CASE )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
lowerCamelCase : List[str] = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location="cpu" )["state_dict"]
lowerCamelCase : Optional[int] = create_rename_keys(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if mlm_model or irtr_model:
lowerCamelCase : Optional[Any] = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCamelCase : Optional[int] = model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Define processor
lowerCamelCase : Tuple = ViltImageProcessor(size=384 )
lowerCamelCase : Tuple = BertTokenizer.from_pretrained("bert-base-uncased" )
lowerCamelCase : List[Any] = ViltProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCamelCase : int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=__SCREAMING_SNAKE_CASE ).raw )
lowerCamelCase : Dict = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=__SCREAMING_SNAKE_CASE ).raw )
lowerCamelCase : int = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
lowerCamelCase : List[str] = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors="pt" )
lowerCamelCase : Any = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors="pt" )
lowerCamelCase : Optional[int] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCamelCase : Any = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=__SCREAMING_SNAKE_CASE ).raw )
if mlm_model:
lowerCamelCase : List[str] = "a bunch of [MASK] laying on a [MASK]."
else:
lowerCamelCase : List[Any] = "How many cats are there?"
lowerCamelCase : Dict = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors="pt" )
lowerCamelCase : str = model(**__SCREAMING_SNAKE_CASE )
# Verify outputs
if mlm_model:
lowerCamelCase : str = torch.Size([1, 11, 30522] )
lowerCamelCase : List[str] = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCamelCase : Dict = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCamelCase : Any = torch.Size([1, 3129] )
lowerCamelCase : Optional[int] = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCamelCase : Tuple = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCamelCase : Any = torch.Size([1, 2] )
lowerCamelCase : Optional[Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_snake_case = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 704
|
from __future__ import annotations
import numpy as np
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase : Dict = np.shape(SCREAMING_SNAKE_CASE_ )
if rows != columns:
lowerCamelCase : int = (
"'table' has to be of square shaped array but got a "
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = np.zeros((rows, columns) )
lowerCamelCase : List[str] = np.zeros((rows, columns) )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Dict = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
lowerCamelCase : Dict = (table[i][j] - total) / upper[j][j]
lowerCamelCase : Dict = 1
for j in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : int = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : Any = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[Any] = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
a_ :Union[str, Any] = {
'distilbert-base-uncased': 5_12,
'distilbert-base-uncased-distilled-squad': 5_12,
'distilbert-base-cased': 5_12,
'distilbert-base-cased-distilled-squad': 5_12,
'distilbert-base-german-cased': 5_12,
'distilbert-base-multilingual-cased': 5_12,
}
a_ :Any = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : int = ['''input_ids''', '''attention_mask''']
lowerCamelCase : Optional[int] = DistilBertTokenizer
def __init__( self : Union[str, Any] , _lowercase : List[Any]=None , _lowercase : int=None , _lowercase : Optional[int]=True , _lowercase : Optional[int]="[UNK]" , _lowercase : Dict="[SEP]" , _lowercase : Optional[int]="[PAD]" , _lowercase : List[Any]="[CLS]" , _lowercase : Union[str, Any]="[MASK]" , _lowercase : List[Any]=True , _lowercase : int=None , **_lowercase : List[Any] , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(_lowercase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = do_lower_case
SCREAMING_SNAKE_CASE__ : Union[str, Any] = strip_accents
SCREAMING_SNAKE_CASE__ : Dict = tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ : List[Any] = normalizer_class(**_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = do_lower_case
def lowercase__ ( self : List[str] , _lowercase : List[str] , _lowercase : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Union[str, Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 35
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A :
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
return None
class A :
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
return None
class A ( unittest.TestCase ):
UpperCamelCase_ : Union[str, Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _A (self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCAmelCase , 'tf' , 1_2 , **lowerCAmelCase )
@require_torch
@slow
def _A (self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCAmelCase , 'pt' , 1_2 , **lowerCAmelCase )
@require_torch
@slow
def _A (self ):
from transformers import BertModel
__lowercase= ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCAmelCase ) )
vocab_file.flush()
__lowercase= BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__lowercase= BertModel(BertConfig(vocab_size=len(lowerCAmelCase ) ) )
model.save_pretrained(lowerCAmelCase )
self._test_export(lowerCAmelCase , 'pt' , 1_2 , lowerCAmelCase )
@require_tf
@slow
def _A (self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__lowercase= self._test_export(lowerCAmelCase , 'tf' , 1_2 , **lowerCAmelCase )
__lowercase= quantize(Path(lowerCAmelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCAmelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def _A (self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__lowercase= self._test_export(lowerCAmelCase , 'pt' , 1_2 , **lowerCAmelCase )
__lowercase= quantize(lowerCAmelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCAmelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , **lowerCAmelCase ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
__lowercase= Path(lowerCAmelCase ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
return path
except Exception as e:
self.fail(lowerCAmelCase )
@require_torch
@require_tokenizers
@slow
def _A (self ):
from transformers import BertModel
__lowercase= BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
__lowercase= BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCAmelCase , lowerCAmelCase , 'pt' )
@require_tf
@require_tokenizers
@slow
def _A (self ):
from transformers import TFBertModel
__lowercase= TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
__lowercase= BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCAmelCase , lowerCAmelCase , 'tf' )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= FeatureExtractionPipeline(lowerCAmelCase , lowerCAmelCase )
__lowercase= ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
__lowercase, __lowercase, __lowercase, __lowercase= infer_shapes(lowerCAmelCase , lowerCAmelCase )
# Assert all variables are present
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowerCAmelCase )
self.assertSequenceEqual(variable_names[3:] , lowerCAmelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def _A (self ):
__lowercase= ['input_ids', 'attention_mask', 'token_type_ids']
__lowercase= {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
__lowercase, __lowercase= ensure_valid_input(FuncContiguousArgs() , lowerCAmelCase , lowerCAmelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCAmelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCAmelCase ) , set(lowerCAmelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCAmelCase , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__lowercase, __lowercase= ensure_valid_input(FuncNonContiguousArgs() , lowerCAmelCase , lowerCAmelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCAmelCase ) , 1 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def _A (self ):
__lowercase= generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 230
| 0
|
from __future__ import annotations
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , ):
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , ):
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
__magic_name__ , nominal_annual_percentage_rate / 3_6_5 , number_of_years * 3_6_5 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Dict = """canine"""
def __init__( self : int , _UpperCamelCase : Tuple=768 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : Any=12 , _UpperCamelCase : Union[str, Any]=3_072 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=0.1 , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Tuple=16_384 , _UpperCamelCase : Tuple=16 , _UpperCamelCase : List[str]=0.0_2 , _UpperCamelCase : List[str]=1e-12 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : Optional[Any]=0XE000 , _UpperCamelCase : Union[str, Any]=0XE001 , _UpperCamelCase : Optional[Any]=4 , _UpperCamelCase : Union[str, Any]=4 , _UpperCamelCase : List[Any]=8 , _UpperCamelCase : Union[str, Any]=16_384 , _UpperCamelCase : List[Any]=128 , **_UpperCamelCase : Optional[int] , ):
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase)
_lowercase: Optional[int] = max_position_embeddings
_lowercase: Union[str, Any] = hidden_size
_lowercase: Tuple = num_hidden_layers
_lowercase: str = num_attention_heads
_lowercase: Any = intermediate_size
_lowercase: Dict = hidden_act
_lowercase: str = hidden_dropout_prob
_lowercase: List[Any] = attention_probs_dropout_prob
_lowercase: Union[str, Any] = initializer_range
_lowercase: str = type_vocab_size
_lowercase: Any = layer_norm_eps
# Character config:
_lowercase: Optional[int] = downsampling_rate
_lowercase: int = upsampling_kernel_size
_lowercase: Optional[int] = num_hash_functions
_lowercase: str = num_hash_buckets
_lowercase: List[str] = local_transformer_stride
| 206
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
def lowercase_ ( _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False ):
'''simple docstring'''
__lowercase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'transformer.blocks.{i}.norm1.weight', F'vilt.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'transformer.blocks.{i}.norm1.bias', F'vilt.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'transformer.blocks.{i}.attn.proj.weight', F'vilt.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'transformer.blocks.{i}.attn.proj.bias', F'vilt.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'transformer.blocks.{i}.norm2.weight', F'vilt.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'transformer.blocks.{i}.norm2.bias', F'vilt.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'transformer.blocks.{i}.mlp.fc1.weight', F'vilt.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'transformer.blocks.{i}.mlp.fc1.bias', F'vilt.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.weight', F'vilt.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.bias', F'vilt.encoder.layer.{i}.output.dense.bias') )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
__lowercase = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase = state_dict.pop(F'transformer.blocks.{i}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'transformer.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[
: config.hidden_size, :
]
__lowercase = in_proj_bias[: config.hidden_size]
__lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase = in_proj_weight[
-config.hidden_size :, :
]
__lowercase = in_proj_bias[-config.hidden_size :]
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowercase = dct.pop(_UpperCamelCase )
__lowercase = val
@torch.no_grad()
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowercase = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=_UpperCamelCase )
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
if "vqa" in checkpoint_url:
__lowercase = True
__lowercase = 31_29
__lowercase = '''huggingface/label-files'''
__lowercase = '''vqa2-id2label.json'''
__lowercase = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowercase = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = ViltForQuestionAnswering(_UpperCamelCase )
elif "nlvr" in checkpoint_url:
__lowercase = True
__lowercase = 2
__lowercase = {0: '''False''', 1: '''True'''}
__lowercase = {v: k for k, v in config.idalabel.items()}
__lowercase = 3
__lowercase = ViltForImagesAndTextClassification(_UpperCamelCase )
elif "irtr" in checkpoint_url:
__lowercase = True
__lowercase = ViltForImageAndTextRetrieval(_UpperCamelCase )
elif "mlm_itm" in checkpoint_url:
__lowercase = True
__lowercase = ViltForMaskedLM(_UpperCamelCase )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
__lowercase = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='''cpu''' )['''state_dict''']
__lowercase = create_rename_keys(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase )
if mlm_model or irtr_model:
__lowercase = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
__lowercase = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_UpperCamelCase )
# Define processor
__lowercase = ViltImageProcessor(size=3_84 )
__lowercase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__lowercase = ViltProcessor(_UpperCamelCase , _UpperCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
__lowercase = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=_UpperCamelCase ).raw )
__lowercase = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=_UpperCamelCase ).raw )
__lowercase = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
__lowercase = processor(_UpperCamelCase , _UpperCamelCase , return_tensors='''pt''' )
__lowercase = processor(_UpperCamelCase , _UpperCamelCase , return_tensors='''pt''' )
__lowercase = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
__lowercase = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=_UpperCamelCase ).raw )
if mlm_model:
__lowercase = '''a bunch of [MASK] laying on a [MASK].'''
else:
__lowercase = '''How many cats are there?'''
__lowercase = processor(_UpperCamelCase , _UpperCamelCase , return_tensors='''pt''' )
__lowercase = model(**_UpperCamelCase )
# Verify outputs
if mlm_model:
__lowercase = torch.Size([1, 11, 3_05_22] )
__lowercase = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _UpperCamelCase , atol=1E-4 )
# verify masked token prediction equals "cats"
__lowercase = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
__lowercase = torch.Size([1, 31_29] )
__lowercase = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _UpperCamelCase , atol=1E-4 )
# verify vqa prediction equals "2"
__lowercase = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
__lowercase = torch.Size([1, 2] )
__lowercase = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a : Any = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 639
|
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Dict:
lowercase : List[str] =R'''\w+[.]\d+'''
lowercase : List[str] =re.findall(__magic_name__ , __magic_name__ )
for pat in pats:
lowercase : Optional[int] =key.replace(__magic_name__ , '''_'''.join(pat.split('''.''' ) ) )
return key
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> str:
lowercase : Dict =pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase : str =pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase : str =pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase : Dict =pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase : Tuple =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase : Tuple =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase : str =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
lowercase : Optional[Any] =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase : Dict =pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase : Union[str, Any] =pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=42 ) -> List[str]:
# Step 1: Convert pytorch tensor to numpy
lowercase : Optional[Any] ={k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase : str =flax_model.init_weights(PRNGKey(__magic_name__ ) )
lowercase : Dict =flatten_dict(__magic_name__ )
lowercase : Dict ={}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase : Dict =rename_key(__magic_name__ )
lowercase : Optional[int] =tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
lowercase , lowercase : Any =rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowercase : Tuple =jnp.asarray(__magic_name__ )
return unflatten_dict(__magic_name__ )
| 92
| 0
|
from collections.abc import Sequence
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0.0
for coeff in reversed(UpperCamelCase__ ):
snake_case_ = result * x + coeff
return result
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCAmelCase : List[str] = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 108
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_UpperCAmelCase : Dict = logging.get_logger(__name__)
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Any = '''upernet'''
def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ):
super().__init__(**snake_case )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
snake_case_ = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(snake_case , snake_case ):
snake_case_ = backbone_config.get('model_type' )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(snake_case )
snake_case_ = backbone_config
snake_case_ = hidden_size
snake_case_ = initializer_range
snake_case_ = pool_scales
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_in_channels
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = loss_ignore_index
def a ( self ):
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 108
| 1
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowercase_ : Union[str, Any] = logging.getLogger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ) -> List[Any]:
super().__init__(
lowerCAmelCase , question_encoder_tokenizer=lowerCAmelCase , generator_tokenizer=lowerCAmelCase , index=lowerCAmelCase , init_retrieval=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: str= None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Union[str, Any]:
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
SCREAMING_SNAKE_CASE__: Any= self._infer_socket_ifname()
# avoid clash with the NCCL port
SCREAMING_SNAKE_CASE__: Any= str(distributed_port + 1 )
SCREAMING_SNAKE_CASE__: int= dist.new_group(ranks=lowerCAmelCase , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase_ ( self ) -> List[str]:
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=torch.floataa ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: str= torch.empty(lowerCAmelCase , dtype=lowerCAmelCase )
dist.scatter(lowerCAmelCase , src=0 , scatter_list=lowerCAmelCase , group=self.process_group )
return target_tensor
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: List[Any]= psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
SCREAMING_SNAKE_CASE__: int= next((addr for addr in addrs if addr.startswith('''e''' )) , lowerCAmelCase )
return ifname
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= self._main_retrieve(lowerCAmelCase , lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase )
# distributed training
SCREAMING_SNAKE_CASE__: List[str]= dist.get_world_size(group=self.process_group )
# gather logic
SCREAMING_SNAKE_CASE__: Union[str, Any]= None
if self._is_main():
SCREAMING_SNAKE_CASE__: List[Any]= [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowerCAmelCase )]
dist.gather(torch.tensor(lowerCAmelCase ) , dst=0 , gather_list=lowerCAmelCase , group=self.process_group )
# scatter logic
SCREAMING_SNAKE_CASE__: Any= question_hidden_states.shape[0]
SCREAMING_SNAKE_CASE__: Optional[Any]= []
SCREAMING_SNAKE_CASE__: str= []
if self._is_main():
assert len(lowerCAmelCase ) == world_size
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= self._main_retrieve(torch.cat(lowerCAmelCase ).numpy() , lowerCAmelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[Any]= torch.tensor(lowerCAmelCase ), torch.tensor(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= self._chunk_tensor(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= self._chunk_tensor(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= self._scattered(lowerCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
SCREAMING_SNAKE_CASE__: Union[str, Any]= self._scattered(lowerCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCAmelCase )
| 64
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __lowercase ( __snake_case ):
_A = (DEISMultistepScheduler,)
_A = (("num_inference_steps", 25),)
def _a(self : Optional[int] , **snake_case : str ) -> Optional[Any]:
_lowercase : str = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**snake_case )
return config
def _a(self : Optional[int] , snake_case : Optional[int]=0 , **snake_case : List[Any] ) -> Dict:
_lowercase : Dict = dict(self.forward_default_kwargs )
_lowercase : Optional[Any] = kwargs.pop("num_inference_steps" , snake_case )
_lowercase : Optional[int] = self.dummy_sample
_lowercase : Tuple = 0.1 * sample
_lowercase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowercase : Dict = self.get_scheduler_config(**snake_case )
_lowercase : int = scheduler_class(**snake_case )
scheduler.set_timesteps(snake_case )
# copy over dummy past residuals
_lowercase : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case )
_lowercase : Any = scheduler_class.from_pretrained(snake_case )
new_scheduler.set_timesteps(snake_case )
# copy over dummy past residuals
_lowercase : int = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowercase , _lowercase : Optional[Any] = sample, sample
for t in range(snake_case , time_step + scheduler.config.solver_order + 1 ):
_lowercase : List[Any] = scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
_lowercase : Union[str, Any] = new_scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _a(self : str ) -> List[str]:
pass
def _a(self : Optional[Any] , snake_case : List[Any]=0 , **snake_case : str ) -> Union[str, Any]:
_lowercase : List[str] = dict(self.forward_default_kwargs )
_lowercase : Optional[int] = kwargs.pop("num_inference_steps" , snake_case )
_lowercase : Dict = self.dummy_sample
_lowercase : Union[str, Any] = 0.1 * sample
_lowercase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowercase : List[Any] = self.get_scheduler_config()
_lowercase : int = scheduler_class(**snake_case )
scheduler.set_timesteps(snake_case )
# copy over dummy past residuals (must be after setting timesteps)
_lowercase : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case )
_lowercase : str = scheduler_class.from_pretrained(snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case )
# copy over dummy past residual (must be after setting timesteps)
_lowercase : int = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowercase : Any = scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
_lowercase : Dict = new_scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _a(self : Dict , snake_case : str=None , **snake_case : Dict ) -> Optional[Any]:
if scheduler is None:
_lowercase : Optional[Any] = self.scheduler_classes[0]
_lowercase : Dict = self.get_scheduler_config(**snake_case )
_lowercase : Optional[int] = scheduler_class(**snake_case )
_lowercase : Tuple = self.scheduler_classes[0]
_lowercase : str = self.get_scheduler_config(**snake_case )
_lowercase : Tuple = scheduler_class(**snake_case )
_lowercase : Optional[int] = 10
_lowercase : List[str] = self.dummy_model()
_lowercase : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case )
for i, t in enumerate(scheduler.timesteps ):
_lowercase : str = model(snake_case , snake_case )
_lowercase : int = scheduler.step(snake_case , snake_case , snake_case ).prev_sample
return sample
def _a(self : Any ) -> Tuple:
_lowercase : Union[str, Any] = dict(self.forward_default_kwargs )
_lowercase : Optional[int] = kwargs.pop("num_inference_steps" , snake_case )
for scheduler_class in self.scheduler_classes:
_lowercase : List[Any] = self.get_scheduler_config()
_lowercase : Optional[Any] = scheduler_class(**snake_case )
_lowercase : Tuple = self.dummy_sample
_lowercase : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case , "set_timesteps" ):
scheduler.set_timesteps(snake_case )
elif num_inference_steps is not None and not hasattr(snake_case , "set_timesteps" ):
_lowercase : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowercase : int = [residual + 0.2, residual + 0.15, residual + 0.10]
_lowercase : Dict = dummy_past_residuals[: scheduler.config.solver_order]
_lowercase : Tuple = scheduler.timesteps[5]
_lowercase : Dict = scheduler.timesteps[6]
_lowercase : Tuple = scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
_lowercase : Union[str, Any] = scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _a(self : Optional[int] ) -> List[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_lowercase : Any = DEISMultistepScheduler(**self.get_scheduler_config() )
_lowercase : List[Any] = self.full_loop(scheduler=snake_case )
_lowercase : Tuple = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
_lowercase : str = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowercase : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
_lowercase : int = DEISMultistepScheduler.from_config(scheduler.config )
_lowercase : Union[str, Any] = self.full_loop(scheduler=snake_case )
_lowercase : Dict = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def _a(self : Union[str, Any] ) -> Optional[int]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=snake_case )
def _a(self : Optional[Any] ) -> List[str]:
self.check_over_configs(thresholding=snake_case )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , algorithm_type="deis" , solver_order=snake_case , solver_type=snake_case , )
def _a(self : int ) -> Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def _a(self : Tuple ) -> List[Any]:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case , solver_type=snake_case , prediction_type=snake_case , algorithm_type=snake_case , )
_lowercase : Any = self.full_loop(
solver_order=snake_case , solver_type=snake_case , prediction_type=snake_case , algorithm_type=snake_case , )
assert not torch.isnan(snake_case ).any(), "Samples have nan numbers"
def _a(self : int ) -> List[str]:
self.check_over_configs(lower_order_final=snake_case )
self.check_over_configs(lower_order_final=snake_case )
def _a(self : Any ) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=snake_case , time_step=0 )
def _a(self : Tuple ) -> Optional[Any]:
_lowercase : Optional[Any] = self.full_loop()
_lowercase : Tuple = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def _a(self : Optional[int] ) -> Dict:
_lowercase : int = self.full_loop(prediction_type="v_prediction" )
_lowercase : str = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.0_91 ) < 1e-3
def _a(self : int ) -> Optional[int]:
_lowercase : int = self.scheduler_classes[0]
_lowercase : Tuple = self.get_scheduler_config(thresholding=snake_case , dynamic_thresholding_ratio=0 )
_lowercase : int = scheduler_class(**snake_case )
_lowercase : Optional[int] = 10
_lowercase : Union[str, Any] = self.dummy_model()
_lowercase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case )
for i, t in enumerate(scheduler.timesteps ):
_lowercase : Tuple = model(snake_case , snake_case )
_lowercase : List[str] = scheduler.step(snake_case , snake_case , snake_case ).prev_sample
assert sample.dtype == torch.floataa
| 461
| 0
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class lowerCamelCase_ ( unittest.TestCase ):
def __magic_name__ ( self ):
a_ = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
a_ = Vector()
def __magic_name__ ( self ):
a_ = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(lowercase__ ) , """(0,0,0,0,0,1)""" )
def __magic_name__ ( self ):
a_ = Vector([1, 2, 3, 4] )
self.assertEqual(len(lowercase__ ) , 4 )
def __magic_name__ ( self ):
a_ = Vector([1, 2] )
a_ = Vector([1, 2, 3, 4, 5] )
a_ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
a_ = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def __magic_name__ ( self ):
a_ = Vector([1, 2, 3] )
a_ = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __magic_name__ ( self ):
a_ = Vector([1, 2, 3] )
a_ = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __magic_name__ ( self ):
a_ = Vector([1, 2, 3] )
a_ = Vector([2, -1, 4] ) # for test of dot product
a_ = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def __magic_name__ ( self ):
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def __magic_name__ ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def __magic_name__ ( self ):
a_ = Vector([1, 2, 3] )
a_ = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , lowercase__ , lowercase__ ) ) , """(3,4,7)""" )
def __magic_name__ ( self ):
a_ = Vector([1, 0, 0, 0, 0, 0] )
a_ = x.copy()
self.assertEqual(str(lowercase__ ) , str(lowercase__ ) )
def __magic_name__ ( self ):
a_ = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(lowercase__ ) , """(0,1,0)""" )
def __magic_name__ ( self ):
a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(lowercase__ ) )
def __magic_name__ ( self ):
a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a_ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(lowercase__ , lowercase__ ) )
def __magic_name__ ( self ):
a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a_ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(lowercase__ , lowercase__ ) )
def __magic_name__ ( self ):
a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __magic_name__ ( self ):
a_ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
a_ = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def __magic_name__ ( self ):
a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(lowercase__ ) )
def __magic_name__ ( self ):
a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def __magic_name__ ( self ):
a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def __magic_name__ ( self ):
a_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def __magic_name__ ( self ):
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 703
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=1000 , _SCREAMING_SNAKE_CASE=[3, 3, 6, 4] , _SCREAMING_SNAKE_CASE=[48, 56, 112, 220] , ):
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = is_training
a_ = use_labels
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = num_labels
a_ = image_size
a_ = layer_depths
a_ = embed_dims
def __magic_name__ ( self ):
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.num_labels )
a_ = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_SCREAMING_SNAKE_CASE , layer_scale_init_value=1E-5 , )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = SwiftFormerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = self.num_labels
a_ = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
a_ = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self ):
((a_) , (a_) , (a_)) = self.prepare_config_and_inputs()
a_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCamelCase : List[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
_lowerCamelCase : Optional[Any] = (
{"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : List[Any] = False
def __magic_name__ ( self ):
a_ = SwiftFormerModelTester(self )
a_ = ConfigTester(
self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def __magic_name__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(_SCREAMING_SNAKE_CASE )
a_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __magic_name__ ( self ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(_SCREAMING_SNAKE_CASE )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __magic_name__ ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = SwiftFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
a_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
a_ = outputs.hidden_states
a_ = 8
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
def _config_zero_init(_SCREAMING_SNAKE_CASE ):
a_ = copy.deepcopy(_SCREAMING_SNAKE_CASE )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1E-10 )
if isinstance(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ):
a_ = _config_zero_init(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return configs_no_init
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
a_ = model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __magic_name__ ( self ):
pass
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
a_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ):
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def __magic_name__ ( self ):
a_ = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_SCREAMING_SNAKE_CASE )
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
a_ = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
a_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
a_ = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 403
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase__ ( snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = BioGptTokenizer
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
UpperCAmelCase = dict(zip(A ,range(len(A ) ) ) )
UpperCAmelCase = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
UpperCAmelCase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ) as fp:
fp.write(json.dumps(A ) )
with open(self.merges_file ,"""w""" ) as fp:
fp.write("""\n""".join(A ) )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = """lower newer"""
UpperCAmelCase = """lower newer"""
return input_text, output_text
def _UpperCamelCase ( self ):
UpperCAmelCase = BioGptTokenizer(self.vocab_file ,self.merges_file )
UpperCAmelCase = """lower"""
UpperCAmelCase = ["""low""", """er</w>"""]
UpperCAmelCase = tokenizer.tokenize(A )
self.assertListEqual(A ,A )
UpperCAmelCase = tokens + ["""<unk>"""]
UpperCAmelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,A )
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
UpperCAmelCase = tokenizer.encode("""sequence builders""" ,add_special_tokens=A )
UpperCAmelCase = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=A )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A ,A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 341
|
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = len(_snake_case )
for i in range(_snake_case ):
for j in range(i + 1 , _snake_case ):
if numbers[j] < numbers[i]:
UpperCAmelCase , UpperCAmelCase = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_UpperCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
_UpperCamelCase = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 341
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''philschmid/bart-large-cnn-samsum'''
UpperCamelCase__ = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
UpperCamelCase__ = '''summarizer'''
UpperCamelCase__ = AutoTokenizer
UpperCamelCase__ = AutoModelForSeqaSeqLM
UpperCamelCase__ = ['''text''']
UpperCamelCase__ = ['''text''']
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any ):
return self.pre_processor(lowercase_ , return_tensors="""pt""" , truncation=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Tuple ):
return self.model.generate(**lowercase_ )[0]
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ):
return self.pre_processor.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase ( ) -> None:
lowercase_ : List[Any] = input("""Enter message: """ )
lowercase_ : str = input("""Enter key [alphanumeric]: """ )
lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowercase_ : List[str] = """encrypt"""
lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith("""d""" ):
lowercase_ : Any = """decrypt"""
lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = 0
lowercase_ : str = key.upper()
for symbol in message:
lowercase_ : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
lowercase_ : Any = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 30
| 1
|
"""simple docstring"""
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case = "" , snake_case = False ) -> None:
# Mapping from the first character of the prefix of the node
_UpperCAmelCase = {}
# A node will be a leaf if the tree contains its word
_UpperCAmelCase = is_leaf
_UpperCAmelCase = prefix
def lowerCamelCase_ ( self , snake_case ) -> tuple[str, str, str]:
_UpperCAmelCase = 0
for q, w in zip(self.prefix , snake_case ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowerCamelCase_ ( self , snake_case ) -> None:
for word in words:
self.insert(snake_case )
def lowerCamelCase_ ( self , snake_case ) -> None:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
_UpperCAmelCase = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_UpperCAmelCase = RadixNode(prefix=snake_case , is_leaf=snake_case )
else:
_UpperCAmelCase = self.nodes[word[0]]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
snake_case )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(snake_case )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_UpperCAmelCase = remaining_prefix
_UpperCAmelCase = self.nodes[matching_string[0]]
_UpperCAmelCase = RadixNode(snake_case , snake_case )
_UpperCAmelCase = aux_node
if remaining_word == "":
_UpperCAmelCase = True
else:
self.nodes[matching_string[0]].insert(snake_case )
def lowerCamelCase_ ( self , snake_case ) -> bool:
_UpperCAmelCase = self.nodes.get(word[0] , snake_case )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(snake_case )
def lowerCamelCase_ ( self , snake_case ) -> bool:
_UpperCAmelCase = self.nodes.get(word[0] , snake_case )
if not incoming_node:
return False
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = incoming_node.match(
snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(snake_case )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_UpperCAmelCase = list(self.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
self.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_UpperCAmelCase = False
# If there is 1 edge, we merge it with its child
else:
_UpperCAmelCase = list(incoming_node.nodes.values() )[0]
_UpperCAmelCase = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_UpperCAmelCase = merging_node.nodes
return True
def lowerCamelCase_ ( self , snake_case = 0 ) -> None:
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = RadixNode()
root.insert_many(A )
assert all(root.find(A ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def UpperCAmelCase ( ):
'''simple docstring'''
assert test_trie()
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = RadixNode()
_UpperCAmelCase = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(A )
print('Words:' , A )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 573
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''lxmert'''
_UpperCAmelCase = {}
def __init__( self , snake_case=30522 , snake_case=768 , snake_case=12 , snake_case=9500 , snake_case=1600 , snake_case=400 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-12 , snake_case=9 , snake_case=5 , snake_case=5 , snake_case=2048 , snake_case=4 , snake_case=6.67 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , **snake_case , ) -> Optional[int]:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = num_qa_labels
_UpperCAmelCase = num_object_labels
_UpperCAmelCase = num_attr_labels
_UpperCAmelCase = l_layers
_UpperCAmelCase = x_layers
_UpperCAmelCase = r_layers
_UpperCAmelCase = visual_feat_dim
_UpperCAmelCase = visual_pos_dim
_UpperCAmelCase = visual_loss_normalizer
_UpperCAmelCase = task_matched
_UpperCAmelCase = task_mask_lm
_UpperCAmelCase = task_obj_predict
_UpperCAmelCase = task_qa
_UpperCAmelCase = visual_obj_loss
_UpperCAmelCase = visual_attr_loss
_UpperCAmelCase = visual_feat_loss
_UpperCAmelCase = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**snake_case )
| 573
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__snake_case: Any = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
__snake_case: Union[str, Any] = parser.parse_args()
if args.model_type == "roberta":
__snake_case: Optional[int] = RobertaForMaskedLM.from_pretrained(args.model_name)
__snake_case: str = "roberta"
elif args.model_type == "gpt2":
__snake_case: str = GPTaLMHeadModel.from_pretrained(args.model_name)
__snake_case: List[str] = "transformer"
__snake_case: Tuple = model.state_dict()
__snake_case: Optional[Any] = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__snake_case: Union[str, Any] = state_dict[F"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__snake_case: Dict = F"""{prefix}.embeddings.{w}.weight"""
__snake_case: Tuple = state_dict[param_name]
for w in ["weight", "bias"]:
__snake_case: Optional[int] = F"""{prefix}.embeddings.LayerNorm.{w}"""
__snake_case: int = state_dict[param_name]
# Transformer Blocks #
__snake_case: Tuple = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__snake_case: List[Any] = state_dict[
F"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
__snake_case: Any = state_dict[F"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__snake_case: Optional[int] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__snake_case: List[str] = state_dict[F"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
__snake_case: Any = state_dict[F"""lm_head.dense.{w}"""]
__snake_case: str = state_dict[F"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__snake_case: Optional[int] = state_dict[F"""{prefix}.ln_f.{w}"""]
__snake_case: Optional[int] = state_dict["lm_head.weight"]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 460
|
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A_ : int ):
"""simple docstring"""
a_ : Optional[Any] = 2
a_ : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(A_ )
if n > 1:
factors.append(A_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 460
| 1
|
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = StableUnCLIPPipeline
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = 32
lowerCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=lowercase__ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
lowerCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase__ , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1_000 , clip_sample=lowercase__ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=lowercase__ )
lowerCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase__ , layers_per_block=1 , upcast_attention=lowercase__ , use_linear_projection=lowercase__ , )
torch.manual_seed(0 )
lowerCAmelCase = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=lowercase__ , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL()
lowerCAmelCase = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , lowercase , lowercase=0 ) -> Optional[int]:
if str(lowercase__ ).startswith("""mps""" ):
lowerCAmelCase = torch.manual_seed(lowercase__ )
else:
lowerCAmelCase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=lowercase__ )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowercase__ )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> int:
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase = pipe("""anime turle""" , generator=lowercase__ , output_type="""np""" )
lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
def _snake_case ( self ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
lowerCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 532
|
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 435
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__SCREAMING_SNAKE_CASE ="""Create a default config file for Accelerate with only a few flags set."""
def a (_lowerCAmelCase="no" , _lowerCAmelCase = default_json_config_file , _lowerCAmelCase = False ):
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase )
path.parent.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
if path.exists():
print(
F"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." )
return False
SCREAMING_SNAKE_CASE_ = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" )
SCREAMING_SNAKE_CASE_ = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE_ = torch.cuda.device_count()
SCREAMING_SNAKE_CASE_ = num_gpus
SCREAMING_SNAKE_CASE_ = False
if num_gpus > 1:
SCREAMING_SNAKE_CASE_ = '''MULTI_GPU'''
else:
SCREAMING_SNAKE_CASE_ = '''NO'''
elif is_xpu_available() and use_xpu:
SCREAMING_SNAKE_CASE_ = torch.xpu.device_count()
SCREAMING_SNAKE_CASE_ = num_xpus
SCREAMING_SNAKE_CASE_ = False
if num_xpus > 1:
SCREAMING_SNAKE_CASE_ = '''MULTI_XPU'''
else:
SCREAMING_SNAKE_CASE_ = '''NO'''
elif is_npu_available():
SCREAMING_SNAKE_CASE_ = torch.npu.device_count()
SCREAMING_SNAKE_CASE_ = num_npus
SCREAMING_SNAKE_CASE_ = False
if num_npus > 1:
SCREAMING_SNAKE_CASE_ = '''MULTI_NPU'''
else:
SCREAMING_SNAKE_CASE_ = '''NO'''
else:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = '''NO'''
SCREAMING_SNAKE_CASE_ = ClusterConfig(**_lowerCAmelCase )
config.to_json_file(_lowerCAmelCase )
return path
def a (_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = parser.add_parser('''default''' , parents=_lowerCAmelCase , help=_lowerCAmelCase , formatter_class=_lowerCAmelCase )
parser.add_argument(
'''--config_file''' , default=_lowerCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=_lowerCAmelCase , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=_lowerCAmelCase )
return parser
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"accelerate configuration saved at {config_file}" )
| 711
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = "realm"
def __init__( self: Tuple , _lowerCamelCase: Union[str, Any]=3_05_22 , _lowerCamelCase: Tuple=7_68 , _lowerCamelCase: str=1_28 , _lowerCamelCase: str=12 , _lowerCamelCase: int=12 , _lowerCamelCase: Union[str, Any]=8 , _lowerCamelCase: Optional[Any]=30_72 , _lowerCamelCase: str="gelu_new" , _lowerCamelCase: str=0.1 , _lowerCamelCase: Union[str, Any]=0.1 , _lowerCamelCase: Optional[int]=5_12 , _lowerCamelCase: Union[str, Any]=2 , _lowerCamelCase: int=0.02 , _lowerCamelCase: Tuple=1E-12 , _lowerCamelCase: List[Any]=2_56 , _lowerCamelCase: Any=10 , _lowerCamelCase: Optional[Any]=1E-3 , _lowerCamelCase: Any=5 , _lowerCamelCase: List[str]=3_20 , _lowerCamelCase: List[str]=13_35_37_18 , _lowerCamelCase: str=50_00 , _lowerCamelCase: str=1 , _lowerCamelCase: str=0 , _lowerCamelCase: Dict=2 , **_lowerCamelCase: Tuple , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
# Common config
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = retriever_proj_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = num_candidates
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = layer_norm_eps
# Reader config
SCREAMING_SNAKE_CASE_ = span_hidden_size
SCREAMING_SNAKE_CASE_ = max_span_width
SCREAMING_SNAKE_CASE_ = reader_layer_norm_eps
SCREAMING_SNAKE_CASE_ = reader_beam_size
SCREAMING_SNAKE_CASE_ = reader_seq_len
# Retrieval config
SCREAMING_SNAKE_CASE_ = num_block_records
SCREAMING_SNAKE_CASE_ = searcher_beam_size
| 89
| 0
|
"""simple docstring"""
from string import ascii_uppercase
UpperCAmelCase : Optional[Any] = {str(ord(c) - 55): c for c in ascii_uppercase}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
lowercase_ = """"""
lowercase_ = 0
lowercase_ = 0
while div != 1:
lowercase_ , lowercase_ = divmod(__lowerCAmelCase , __lowerCAmelCase )
if base >= 11 and 9 < mod < 36:
lowercase_ = ALPHABET_VALUES[str(__lowerCAmelCase )]
else:
lowercase_ = str(__lowerCAmelCase )
new_value += actual_value
lowercase_ = num // base
lowercase_ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__lowerCAmelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 567
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 567
| 1
|
"""simple docstring"""
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase__ = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = _ask_options(
'''In which compute environment are you running?''', ['''This machine''', '''AWS (Amazon SageMaker)'''], _convert_compute_environment, )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE_ = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE_ = get_cluster_input()
return config
def _lowerCamelCase ( __a=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE_ = subparsers.add_parser('''config''', description=UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser('''Accelerate config command''', description=UpperCamelCase__ )
parser.add_argument(
'''--config_file''', default=UpperCamelCase__, help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
), )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE_ = args.config_file
else:
if not os.path.isdir(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(UpperCamelCase__ )
else:
config.to_yaml_file(UpperCamelCase__ )
print(F'accelerate configuration saved at {config_file}' )
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = config_command_parser()
SCREAMING_SNAKE_CASE_ = parser.parse_args()
config_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 720
|
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCamelCase ( __a ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''', __a, )
if isinstance(__a, torch.Tensor ):
return image
elif isinstance(__a, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [image]
if isinstance(image[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = image[0].size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE_ = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE_ = np.concatenate(__a, axis=0 )
SCREAMING_SNAKE_CASE_ = np.array(__a ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = image.transpose(0, 3, 1, 2 )
SCREAMING_SNAKE_CASE_ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
elif isinstance(image[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(__a, dim=0 )
return image
def _lowerCamelCase ( __a ):
if isinstance(__a, torch.Tensor ):
return mask
elif isinstance(__a, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [mask]
if isinstance(mask[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = mask[0].size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE_ = [np.array(m.convert('''L''' ).resize((w, h), resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE_ = np.concatenate(__a, axis=0 )
SCREAMING_SNAKE_CASE_ = mask.astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
elif isinstance(mask[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(__a, dim=0 )
return mask
class snake_case ( __lowercase ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2_50 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = image
SCREAMING_SNAKE_CASE_ = _preprocess_image(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ = _preprocess_mask(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
SCREAMING_SNAKE_CASE_ = original_image.shape
SCREAMING_SNAKE_CASE_ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
SCREAMING_SNAKE_CASE_ = eta
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE_ = generator[0] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE_ = self.scheduler.undo_step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = t
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 628
| 0
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class A ( unittest.TestCase ):
def lowercase_ (self : Optional[int] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCAmelCase__ = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=_A , cache_dir=_A )
UpperCAmelCase__ = [t[-1] for t in os.walk(os.path.join(_A , os.listdir(_A )[0] , "snapshots" ) )]
UpperCAmelCase__ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class A ( unittest.TestCase ):
def lowercase_ (self : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=_A )
UpperCAmelCase__ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
UpperCAmelCase__ = jax.random.PRNGKey(0 )
UpperCAmelCase__ = 4
UpperCAmelCase__ = jax.device_count()
UpperCAmelCase__ = num_samples * [prompt]
UpperCAmelCase__ = pipeline.prepare_inputs(_A )
# shard inputs and rng
UpperCAmelCase__ = replicate(_A )
UpperCAmelCase__ = jax.random.split(_A , _A )
UpperCAmelCase__ = shard(_A )
UpperCAmelCase__ = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3
assert np.abs(np.abs(_A , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5E-1
UpperCAmelCase__ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_A ) == num_samples
def lowercase_ (self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=_A )
UpperCAmelCase__ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
UpperCAmelCase__ = jax.random.PRNGKey(0 )
UpperCAmelCase__ = 5_0
UpperCAmelCase__ = jax.device_count()
UpperCAmelCase__ = num_samples * [prompt]
UpperCAmelCase__ = pipeline.prepare_inputs(_A )
# shard inputs and rng
UpperCAmelCase__ = replicate(_A )
UpperCAmelCase__ = jax.random.split(_A , _A )
UpperCAmelCase__ = shard(_A )
UpperCAmelCase__ = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5E-1
def lowercase_ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=_A )
UpperCAmelCase__ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
UpperCAmelCase__ = jax.random.PRNGKey(0 )
UpperCAmelCase__ = 5_0
UpperCAmelCase__ = jax.device_count()
UpperCAmelCase__ = num_samples * [prompt]
UpperCAmelCase__ = pipeline.prepare_inputs(_A )
# shard inputs and rng
UpperCAmelCase__ = replicate(_A )
UpperCAmelCase__ = jax.random.split(_A , _A )
UpperCAmelCase__ = shard(_A )
UpperCAmelCase__ = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5E-1
def lowercase_ (self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
UpperCAmelCase__ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
UpperCAmelCase__ = jax.random.PRNGKey(0 )
UpperCAmelCase__ = 5_0
UpperCAmelCase__ = jax.device_count()
UpperCAmelCase__ = num_samples * [prompt]
UpperCAmelCase__ = pipeline.prepare_inputs(_A )
# shard inputs and rng
UpperCAmelCase__ = replicate(_A )
UpperCAmelCase__ = jax.random.split(_A , _A )
UpperCAmelCase__ = shard(_A )
UpperCAmelCase__ = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5E-1
def lowercase_ (self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=_A , steps_offset=1 , )
UpperCAmelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=_A , safety_checker=_A , )
UpperCAmelCase__ = scheduler.create_state()
UpperCAmelCase__ = scheduler_state
UpperCAmelCase__ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
UpperCAmelCase__ = jax.random.PRNGKey(0 )
UpperCAmelCase__ = 5_0
UpperCAmelCase__ = jax.device_count()
UpperCAmelCase__ = num_samples * [prompt]
UpperCAmelCase__ = pipeline.prepare_inputs(_A )
# shard inputs and rng
UpperCAmelCase__ = replicate(_A )
UpperCAmelCase__ = jax.random.split(_A , _A )
UpperCAmelCase__ = shard(_A )
UpperCAmelCase__ = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5E-1
def lowercase_ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
UpperCAmelCase__ = jax.device_count()
UpperCAmelCase__ = num_samples * [prompt]
UpperCAmelCase__ = jax.random.split(jax.random.PRNGKey(0 ) , _A )
UpperCAmelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=_A , )
UpperCAmelCase__ = replicate(_A )
UpperCAmelCase__ = pipeline.prepare_inputs(_A )
UpperCAmelCase__ = shard(_A )
UpperCAmelCase__ = pipeline(_A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
UpperCAmelCase__ = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
UpperCAmelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=_A , use_memory_efficient_attention=_A , )
UpperCAmelCase__ = replicate(_A )
UpperCAmelCase__ = pipeline.prepare_inputs(_A )
UpperCAmelCase__ = shard(_A )
UpperCAmelCase__ = pipeline(_A , _A , _A , jit=_A ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
UpperCAmelCase__ = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 486
|
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCAmelCase :Dict = datasets.utils.logging.get_logger(__name__)
class _lowerCamelCase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
A_ : bool = None
A_ : bool = None
class _lowerCamelCase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
A_ : Union[str, Any] = datasets.Audio()
A_ : Tuple = """audio"""
A_ : Optional[Any] = AudioFolderConfig
A_ : List[str] # definition at the bottom of the script
A_ : Any = AudioClassification(audio_column="""audio""" , label_column="""label""" )
lowerCAmelCase :List[str] = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
lowerCAmelCase :str = AUDIO_EXTENSIONS
| 561
| 0
|
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __lowerCAmelCase ( enum.Enum ):
snake_case : Tuple = 0
snake_case : Dict = 1
snake_case : Tuple = 2
@add_end_docstrings(__a )
class __lowerCAmelCase ( __a ):
snake_case : Optional[int] = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_UpperCAmelCase : int = None
if self.model.config.prefix is not None:
_UpperCAmelCase : Any = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_UpperCAmelCase : Union[str, Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_UpperCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=lowerCAmelCase__ , **self._forward_params )
_UpperCAmelCase : Any = {**self._preprocess_params, **preprocess_params}
_UpperCAmelCase : int = {**self._forward_params, **forward_params}
def snake_case_ (self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
_UpperCAmelCase : Optional[int] = {}
if prefix is not None:
_UpperCAmelCase : Any = prefix
if prefix:
_UpperCAmelCase : str = self.tokenizer(
lowerCAmelCase__ , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=self.framework )
_UpperCAmelCase : Optional[int] = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
""" [None, 'hole']""" )
_UpperCAmelCase : Optional[Any] = handle_long_generation
preprocess_params.update(lowerCAmelCase__ )
_UpperCAmelCase : Dict = generate_kwargs
_UpperCAmelCase : Optional[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
_UpperCAmelCase : Any = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
_UpperCAmelCase : Union[str, Any] = ReturnType.TENSORS
if return_type is not None:
_UpperCAmelCase : Tuple = return_type
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCAmelCase : Optional[Any] = self.tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
_UpperCAmelCase : Union[str, Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __call__(self , lowerCAmelCase__ , **lowerCAmelCase__ ):
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__="" , lowerCAmelCase__=None , **lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=self.framework )
_UpperCAmelCase : Dict = prompt_text
if handle_long_generation == "hole":
_UpperCAmelCase : List[Any] = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
_UpperCAmelCase : List[Any] = generate_kwargs["""max_new_tokens"""]
else:
_UpperCAmelCase : Optional[Any] = generate_kwargs.get("""max_length""" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_UpperCAmelCase : List[Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
_UpperCAmelCase : List[str] = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
_UpperCAmelCase : int = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def snake_case_ (self , lowerCAmelCase__ , **lowerCAmelCase__ ):
_UpperCAmelCase : List[Any] = model_inputs["""input_ids"""]
_UpperCAmelCase : List[str] = model_inputs.get("""attention_mask""" , lowerCAmelCase__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[Any] = 1
else:
_UpperCAmelCase : str = input_ids.shape[0]
_UpperCAmelCase : int = model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_UpperCAmelCase : Any = generate_kwargs.pop("""prefix_length""" , 0 )
if prefix_length > 0:
_UpperCAmelCase : int = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
_UpperCAmelCase : Optional[Any] = generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_UpperCAmelCase : Dict = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_UpperCAmelCase : int = self.model.generate(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = generated_sequence.shape[0]
if self.framework == "pt":
_UpperCAmelCase : Dict = generated_sequence.reshape(lowerCAmelCase__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_UpperCAmelCase : Any = tf.reshape(lowerCAmelCase__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__=ReturnType.FULL_TEXT , lowerCAmelCase__=True ):
_UpperCAmelCase : Union[str, Any] = model_outputs["""generated_sequence"""][0]
_UpperCAmelCase : int = model_outputs["""input_ids"""]
_UpperCAmelCase : Tuple = model_outputs["""prompt_text"""]
_UpperCAmelCase : str = generated_sequence.numpy().tolist()
_UpperCAmelCase : Any = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_UpperCAmelCase : str = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_UpperCAmelCase : Tuple = self.tokenizer.decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_UpperCAmelCase : Optional[Any] = 0
else:
_UpperCAmelCase : Optional[Any] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , ) )
if return_type == ReturnType.FULL_TEXT:
_UpperCAmelCase : Optional[int] = prompt_text + text[prompt_length:]
else:
_UpperCAmelCase : Optional[Any] = text[prompt_length:]
_UpperCAmelCase : int = {"""generated_text""": all_text}
records.append(lowerCAmelCase__ )
return records
| 716
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ : List[str] = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Dict = ['''LayoutLMv2FeatureExtractor''']
lowerCAmelCase_ : str = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Any = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 156
| 0
|
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
a = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
a = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
a = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
a = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
a = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[Any] ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=[1, 10, 100] , _UpperCAmelCase : str=4 , _UpperCAmelCase : Union[str, Any]=3.0 ):
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor:
_A = []
_A = Counter()
_A = 0
_A = defaultdict(_UpperCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ):
for candidate in candidates:
_A = candidate + '\n' + test_case
_A = (test_program, timeout, task_id, completion_id[task_id])
_A = executor.submit(_UpperCAmelCase , *_UpperCAmelCase )
futures.append(_UpperCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_UpperCAmelCase ):
_A = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
_A , _A = [], []
for result in results.values():
result.sort()
_A = [r[1]['passed'] for r in result]
total.append(len(_UpperCAmelCase ) )
correct.append(sum(_UpperCAmelCase ) )
_A = np.array(_UpperCAmelCase )
_A = np.array(_UpperCAmelCase )
_A = k
_A = {F'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _snake_case ( _snake_case : Tuple , _snake_case : int , _snake_case : int ) -> int:
'''simple docstring'''
def estimator(_snake_case : int , _snake_case : int , _snake_case : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_snake_case , _snake_case ):
_A = itertools.repeat(_snake_case , len(_snake_case ) )
else:
assert len(_snake_case ) == len(_snake_case )
_A = iter(_snake_case )
return np.array([estimator(int(_snake_case ) , int(_snake_case ) , _snake_case ) for n, c in zip(_snake_case , _snake_case )] )
| 7
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str]=14 , __magic_name__ : Dict=7 , __magic_name__ : Optional[Any]=True , __magic_name__ : List[str]=True , __magic_name__ : List[Any]=False , __magic_name__ : Union[str, Any]=True , __magic_name__ : List[str]=99 , __magic_name__ : Any=32 , __magic_name__ : Optional[Any]=4 , __magic_name__ : str=4 , __magic_name__ : Optional[int]=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Optional[int]=512 , __magic_name__ : int=0.02 , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = rotary_dim
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = vocab_size - 1
SCREAMING_SNAKE_CASE_ = vocab_size - 1
SCREAMING_SNAKE_CASE_ = vocab_size - 1
def __A ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__magic_name__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __A ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __A ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Any ) -> int:
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = model_class_name(__magic_name__ )
SCREAMING_SNAKE_CASE_ = model.init_cache(input_ids.shape[0] , __magic_name__ )
SCREAMING_SNAKE_CASE_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
SCREAMING_SNAKE_CASE_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE_ = model(
input_ids[:, :-1] , attention_mask=__magic_name__ , past_key_values=__magic_name__ , position_ids=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
SCREAMING_SNAKE_CASE_ = model(
input_ids[:, -1:] , attention_mask=__magic_name__ , past_key_values=outputs_cache.past_key_values , position_ids=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
SCREAMING_SNAKE_CASE_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def __A ( self : Optional[int] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = model_class_name(__magic_name__ )
SCREAMING_SNAKE_CASE_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
SCREAMING_SNAKE_CASE_ = model.init_cache(input_ids.shape[0] , __magic_name__ )
SCREAMING_SNAKE_CASE_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE_ = model(
input_ids[:, :-1] , attention_mask=__magic_name__ , past_key_values=__magic_name__ , position_ids=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
SCREAMING_SNAKE_CASE_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__magic_name__ , position_ids=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ )
SCREAMING_SNAKE_CASE_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowerCamelCase__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __A ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = FlaxGPTJModelTester(self )
def __A ( self : Tuple ) -> List[str]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def __A ( self : List[str] ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
@tooslow
def __A ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
SCREAMING_SNAKE_CASE_ = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=__magic_name__ , truncation=__magic_name__ )
SCREAMING_SNAKE_CASE_ = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = model.config.eos_token_id
SCREAMING_SNAKE_CASE_ = jax.jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(__magic_name__ , __magic_name__ )
@is_pt_flax_cross_test
def __A ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_ = getattr(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pt_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = pt_model_class(__magic_name__ ).eval()
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __magic_name__ )
SCREAMING_SNAKE_CASE_ = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = pt_model(**__magic_name__ ).to_tuple()
SCREAMING_SNAKE_CASE_ = fx_model(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(__magic_name__ , __magic_name__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = model_class.from_pretrained(__magic_name__ , from_pt=__magic_name__ )
SCREAMING_SNAKE_CASE_ = fx_model_loaded(**__magic_name__ ).to_tuple()
self.assertEqual(
len(__magic_name__ ) , len(__magic_name__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(__magic_name__ , __magic_name__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_ = getattr(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = pt_model_class(__magic_name__ ).eval()
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = load_flax_weights_in_pytorch_model(__magic_name__ , fx_model.params )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pt_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = pt_model(**__magic_name__ ).to_tuple()
SCREAMING_SNAKE_CASE_ = fx_model(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(__magic_name__ , __magic_name__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = pt_model_class.from_pretrained(__magic_name__ , from_flax=__magic_name__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = pt_model_loaded(**__magic_name__ ).to_tuple()
self.assertEqual(
len(__magic_name__ ) , len(__magic_name__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(__magic_name__ , __magic_name__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def __A ( self : Optional[int] ) -> str:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
SCREAMING_SNAKE_CASE_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
| 140
| 0
|
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
_A = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def lowercase_ ( ) -> List[Any]:
lowerCAmelCase__ : int = os.path.dirname(os.path.realpath(__UpperCAmelCase ) )
lowerCAmelCase__ : Union[str, Any] = os.path.join(__UpperCAmelCase , """words.txt""" )
lowerCAmelCase__ : Tuple = """"""
with open(__UpperCAmelCase ) as f:
lowerCAmelCase__ : Tuple = f.readline()
lowerCAmelCase__ : int = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
lowerCAmelCase__ : Dict = [
word
for word in [sum(ord(__UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 507
|
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
_A = TypeVar("""_T""")
class _lowerCamelCase ( Generic[_T] ):
def __init__( self : Optional[Any] , UpperCamelCase : Iterable[_T] | None = None ) -> None:
"""simple docstring"""
lowerCAmelCase__ : list[_T] = list(iterable or [] )
lowerCAmelCase__ : list[_T] = []
def __len__( self : str ) -> int:
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Tuple ) -> str:
"""simple docstring"""
return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : _T ) -> None:
"""simple docstring"""
self._stacka.append(UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] ) -> _T:
"""simple docstring"""
lowerCAmelCase__ : Dict = self._stacka.pop
lowerCAmelCase__ : int = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 507
| 1
|
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : Optional[int] = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
a__ : List[Any] = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
a__ : Any = '''</w>'''
a__ : List[str] = '''@@ '''
def __lowerCamelCase ( UpperCAmelCase_ ) ->List[Any]:
snake_case__ = set()
snake_case__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ = char
return pairs
# Speech2Text2 has no max input length
a__ : Tuple = {'''facebook/s2t-wav2vec2-large-en-de''': 1024}
class __snake_case ( __magic_name__ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="<pad>" , UpperCamelCase_="</s>" , UpperCamelCase_="<unk>" , UpperCamelCase_=False , UpperCamelCase_=None , **UpperCamelCase_ , ) -> Optional[int]:
super().__init__(
unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , **UpperCamelCase_ , )
snake_case__ = do_lower_case
with open(UpperCamelCase_ , encoding='utf-8' ) as vocab_handle:
snake_case__ = json.load(UpperCamelCase_ )
snake_case__ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
snake_case__ = None
snake_case__ = None
else:
with open(UpperCamelCase_ , encoding='utf-8' ) as merges_handle:
snake_case__ = merges_handle.read().split('\n' )[:-1]
snake_case__ = [tuple(merge.split()[:2] ) for merge in merges]
snake_case__ = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
snake_case__ = {}
@property
def _snake_case ( self ) -> int:
return len(self.decoder )
def _snake_case ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , UpperCamelCase_ ) -> Union[str, Any]:
snake_case__ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
snake_case__ = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
snake_case__ = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ , snake_case__ = bigram
snake_case__ = []
snake_case__ = 0
while i < len(UpperCamelCase_ ):
try:
snake_case__ = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ = tuple(UpperCamelCase_ )
snake_case__ = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
snake_case__ = get_pairs(UpperCamelCase_ )
snake_case__ = ' '.join(UpperCamelCase_ )
if word == "\n " + BPE_TOKEN_MERGES:
snake_case__ = '\n' + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase_ ):
snake_case__ = word.replace(UpperCamelCase_ , '' )
snake_case__ = word.replace(' ' , UpperCamelCase_ )
snake_case__ = word
return word
def _snake_case ( self , UpperCamelCase_ ) -> int:
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
snake_case__ = text.lower()
snake_case__ = text.split()
snake_case__ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(' ' ) ) )
return split_tokens
def _snake_case ( self , UpperCamelCase_ ) -> int:
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self , UpperCamelCase_ ) -> str:
snake_case__ = self.decoder.get(UpperCamelCase_ , self.unk_token )
return result
def _snake_case ( self , UpperCamelCase_ ) -> str:
snake_case__ = ' '.join(UpperCamelCase_ )
# make sure @@ tokens are concatenated
snake_case__ = ''.join(string.split(UpperCamelCase_ ) )
return string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '\n' )
snake_case__ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
snake_case__ = token_index
writer.write(' '.join(UpperCamelCase_ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 368
|
'''simple docstring'''
from math import loga
def __lowerCamelCase ( UpperCAmelCase_ ) ->int:
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
| 1
|
def _A( UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
stooge(UpperCamelCase__ , 0 , len(UpperCamelCase__ ) - 1 )
return arr
def _A( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__lowercase , __lowercase = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__lowercase = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(UpperCamelCase__ , UpperCamelCase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(UpperCamelCase__ , i + t , (UpperCamelCase__) )
# Recursively sort first 2/3 elements
stooge(UpperCamelCase__ , UpperCamelCase__ , (h - t) )
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 362
|
import argparse
import copy
def _A( UpperCamelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__lowercase = {}
with open(UpperCamelCase__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__lowercase = []
_list.append([line.split()[1], line.split()[2]] )
__lowercase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__lowercase = []
_list.append([line.split()[0], line.split()[2]] )
__lowercase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _A( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(UpperCamelCase__ ) as f:
__lowercase = f.read(1 )
__lowercase = start_node
__lowercase = []
__lowercase = start_node
__lowercase = 0
while visiting not in first_solution:
__lowercase = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(UpperCamelCase__ ) and k[0] not in first_solution:
__lowercase = k[1]
__lowercase = k[0]
first_solution.append(UpperCamelCase__ )
__lowercase = distance_of_first_solution + int(UpperCamelCase__ )
__lowercase = best_node
first_solution.append(UpperCamelCase__ )
__lowercase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__lowercase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def _A( UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__lowercase = []
for n in solution[1:-1]:
__lowercase = solution.index(UpperCamelCase__ )
for kn in solution[1:-1]:
__lowercase = solution.index(UpperCamelCase__ )
if n == kn:
continue
__lowercase = copy.deepcopy(UpperCamelCase__ )
__lowercase = kn
__lowercase = n
__lowercase = 0
for k in _tmp[:-1]:
__lowercase = _tmp[_tmp.index(UpperCamelCase__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__lowercase = distance + int(i[1] )
_tmp.append(UpperCamelCase__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__lowercase = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda UpperCamelCase__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _A( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
__lowercase = 1
__lowercase = first_solution
__lowercase = []
__lowercase = distance_of_first_solution
__lowercase = solution
while count <= iters:
__lowercase = find_neighborhood(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = 0
__lowercase = neighborhood[index_of_best_solution]
__lowercase = len(UpperCamelCase__ ) - 1
__lowercase = False
while not found:
__lowercase = 0
while i < len(UpperCamelCase__ ):
if best_solution[i] != solution[i]:
__lowercase = best_solution[i]
__lowercase = solution[i]
break
__lowercase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__lowercase = True
__lowercase = best_solution[:-1]
__lowercase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__lowercase = cost
__lowercase = solution
else:
__lowercase = index_of_best_solution + 1
__lowercase = neighborhood[index_of_best_solution]
if len(UpperCamelCase__ ) >= size:
tabu_list.pop(0 )
__lowercase = count + 1
return best_solution_ever, best_cost
def _A( UpperCamelCase__ : List[Any]=None ) -> int:
'''simple docstring'''
__lowercase = generate_neighbours(args.File )
__lowercase , __lowercase = generate_first_solution(
args.File , UpperCamelCase__ )
__lowercase , __lowercase = tabu_search(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.Iterations , args.Size , )
print(F'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 362
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.