code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =(DEISMultistepScheduler,)
__UpperCAmelCase : Optional[int] =(("""num_inference_steps""", 2_5),)
def snake_case ( self , **__a ):
__lowerCAmelCase = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**__a )
return config
def snake_case ( self , __a=0 , **__a ):
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __a )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config(**__a )
__lowerCAmelCase = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
__lowerCAmelCase = scheduler_class.from_pretrained(__a )
new_scheduler.set_timesteps(__a )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase , __lowerCAmelCase = sample, sample
for t in range(__a , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
__lowerCAmelCase = new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case ( self ):
pass
def snake_case ( self , __a=0 , **__a ):
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __a )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
__lowerCAmelCase = scheduler_class.from_pretrained(__a )
# copy over dummy past residuals
new_scheduler.set_timesteps(__a )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
__lowerCAmelCase = new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case ( self , __a=None , **__a ):
if scheduler is None:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(**__a )
__lowerCAmelCase = scheduler_class(**__a )
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(**__a )
__lowerCAmelCase = scheduler_class(**__a )
__lowerCAmelCase = 10
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase = model(__a , __a )
__lowerCAmelCase = scheduler.step(__a , __a , __a ).prev_sample
return sample
def snake_case ( self ):
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __a )
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__a )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__a , "set_timesteps" ):
scheduler.set_timesteps(__a )
elif num_inference_steps is not None and not hasattr(__a , "set_timesteps" ):
__lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
__lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
__lowerCAmelCase = scheduler.timesteps[5]
__lowerCAmelCase = scheduler.timesteps[6]
__lowerCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
__lowerCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase = self.full_loop(scheduler=__a )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
__lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase = self.full_loop(scheduler=__a )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def snake_case ( self ):
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=__a )
def snake_case ( self ):
self.check_over_configs(thresholding=__a )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , algorithm_type="deis" , solver_order=__a , solver_type=__a , )
def snake_case ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def snake_case ( self ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , )
__lowerCAmelCase = self.full_loop(
solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , )
assert not torch.isnan(__a ).any(), "Samples have nan numbers"
def snake_case ( self ):
self.check_over_configs(lower_order_final=__a )
self.check_over_configs(lower_order_final=__a )
def snake_case ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=__a , time_step=0 )
def snake_case ( self ):
__lowerCAmelCase = self.full_loop()
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def snake_case ( self ):
__lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def snake_case ( self ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(thresholding=__a , dynamic_thresholding_ratio=0 )
__lowerCAmelCase = scheduler_class(**__a )
__lowerCAmelCase = 10
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase = model(__a , __a )
__lowerCAmelCase = scheduler.step(__a , __a , __a ).prev_sample
assert sample.dtype == torch.floataa
| 57
|
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' )
A__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
A__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 14
| 0
|
'''simple docstring'''
A = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def lowerCAmelCase__ ( lowerCamelCase : Dict ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[str] ):
assert len(str(_UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_A : Optional[int] = year // 100
_A : Any = (5 * (century % 4) + 2) % 7
_A : str = year % 100
_A : int = centurian % 12
_A : Dict = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_A : str = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_A : List[Any] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
A : Tuple = logging.get_logger(__name__)
A : Tuple = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = "longformer"
def __init__( self : Any , SCREAMING_SNAKE_CASE : Union[List[int], int] = 512 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 30522 , SCREAMING_SNAKE_CASE : int = 768 , SCREAMING_SNAKE_CASE : int = 12 , SCREAMING_SNAKE_CASE : int = 12 , SCREAMING_SNAKE_CASE : int = 3072 , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : float = 1e-12 , SCREAMING_SNAKE_CASE : bool = False , **SCREAMING_SNAKE_CASE : List[Any] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
_A : List[Any] = attention_window
_A : int = sep_token_id
_A : Tuple = bos_token_id
_A : Any = eos_token_id
_A : List[str] = vocab_size
_A : Any = hidden_size
_A : Optional[int] = num_hidden_layers
_A : int = num_attention_heads
_A : Dict = hidden_act
_A : List[Any] = intermediate_size
_A : int = hidden_dropout_prob
_A : Optional[int] = attention_probs_dropout_prob
_A : int = max_position_embeddings
_A : Any = type_vocab_size
_A : Dict = initializer_range
_A : Any = layer_norm_eps
_A : List[Any] = onnx_export
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : "PretrainedConfig" , SCREAMING_SNAKE_CASE : str = "default" , SCREAMING_SNAKE_CASE : "List[PatchingSpec]" = None):
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_A : Optional[Any] = True
@property
def A ( self : List[str]):
if self.task == "multiple-choice":
_A : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_A : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
])
@property
def A ( self : str):
_A : int = super().outputs
if self.task == "default":
_A : str = {0: 'batch'}
return outputs
@property
def A ( self : List[Any]):
return 1e-4
@property
def A ( self : Dict):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14)
def A ( self : str , SCREAMING_SNAKE_CASE : "PreTrainedTokenizerBase" , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ):
_A : Union[str, Any] = super().generate_dummy_inputs(
preprocessor=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE)
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_A : Tuple = torch.zeros_like(inputs['input_ids'])
# make every second token global
_A : Dict = 1
return inputs
| 227
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCAmelCase__ : List[str] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["pixel_values"]
def __init__( self : Tuple ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Dict[str, int]] = None ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[int, float] = 1 / 255 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,**lowerCamelCase__ : List[str] ,):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__ = size if size is not None else {'shortest_edge': 256}
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
UpperCAmelCase__ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,param_name='crop_size' )
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = resample
UpperCAmelCase__ = do_center_crop
UpperCAmelCase__ = crop_size
UpperCAmelCase__ = do_rescale
UpperCAmelCase__ = rescale_factor
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Tuple ,):
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCAmelCase__ = get_resize_output_image_size(lowerCamelCase__ ,size=size['shortest_edge'] ,default_to_square=lowerCamelCase__ )
return resize(lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Tuple ,):
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(lowerCamelCase__ ,size=(size['height'], size['width']) ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : float ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : int ):
return rescale(lowerCamelCase__ ,scale=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : List[Any] ,):
return normalize(lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[float] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**lowerCamelCase__ : Union[str, Any] ,):
UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ = size if size is not None else self.size
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
UpperCAmelCase__ = resample if resample is not None else self.resample
UpperCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,param_name='crop_size' )
UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ = image_std if image_std is not None else self.image_std
UpperCAmelCase__ = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase__ = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
UpperCAmelCase__ = [self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
UpperCAmelCase__ = [self.center_crop(image=lowerCamelCase__ ,size=lowerCamelCase__ ) for image in images]
if do_rescale:
UpperCAmelCase__ = [self.rescale(image=lowerCamelCase__ ,scale=lowerCamelCase__ ) for image in images]
if do_normalize:
UpperCAmelCase__ = [self.normalize(image=lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ) for image in images]
UpperCAmelCase__ = [to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ ) for image in images]
UpperCAmelCase__ = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Tuple] = None ):
UpperCAmelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowerCamelCase__ ):
UpperCAmelCase__ = target_sizes.numpy()
UpperCAmelCase__ = []
for idx in range(len(lowerCamelCase__ ) ):
UpperCAmelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=lowerCamelCase__ )
UpperCAmelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
UpperCAmelCase__ = logits.argmax(dim=1 )
UpperCAmelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 98
|
"""simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = max(len(lowerCamelCase ) , len(lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase ) , b_binary.zfill(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''wavlm'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : List[str]=768 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Any=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Optional[Any]=1E-5 , UpperCamelCase__ : Optional[int]="group" , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase__ : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : int=False , UpperCamelCase__ : List[Any]=128 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : List[str]=320 , UpperCamelCase__ : Any=800 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=0.05 , UpperCamelCase__ : List[Any]=10 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Union[str, Any]=10 , UpperCamelCase__ : int=320 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[Any]=100 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Union[str, Any]=256 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[Any]="mean" , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Tuple=256 , UpperCamelCase__ : int=(512, 512, 512, 512, 1500) , UpperCamelCase__ : int=(5, 3, 3, 1, 1) , UpperCamelCase__ : List[str]=(1, 2, 3, 1, 1) , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Union[str, Any]=80 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=3 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : Union[str, Any]=None , **UpperCamelCase__ : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
__UpperCamelCase =hidden_size
__UpperCamelCase =feat_extract_norm
__UpperCamelCase =feat_extract_activation
__UpperCamelCase =list(_SCREAMING_SNAKE_CASE )
__UpperCamelCase =list(_SCREAMING_SNAKE_CASE )
__UpperCamelCase =list(_SCREAMING_SNAKE_CASE )
__UpperCamelCase =conv_bias
__UpperCamelCase =num_buckets
__UpperCamelCase =max_bucket_distance
__UpperCamelCase =num_conv_pos_embeddings
__UpperCamelCase =num_conv_pos_embedding_groups
__UpperCamelCase =len(self.conv_dim )
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =num_attention_heads
__UpperCamelCase =hidden_dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =activation_dropout
__UpperCamelCase =feat_proj_dropout
__UpperCamelCase =final_dropout
__UpperCamelCase =layerdrop
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =initializer_range
__UpperCamelCase =num_ctc_classes
__UpperCamelCase =vocab_size
__UpperCamelCase =do_stable_layer_norm
__UpperCamelCase =use_weighted_layer_sum
__UpperCamelCase =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase =apply_spec_augment
__UpperCamelCase =mask_time_prob
__UpperCamelCase =mask_time_length
__UpperCamelCase =mask_time_min_masks
__UpperCamelCase =mask_feature_prob
__UpperCamelCase =mask_feature_length
# parameters for pretraining with codevector quantized representations
__UpperCamelCase =num_codevectors_per_group
__UpperCamelCase =num_codevector_groups
__UpperCamelCase =contrastive_logits_temperature
__UpperCamelCase =num_negatives
__UpperCamelCase =codevector_dim
__UpperCamelCase =proj_codevector_dim
__UpperCamelCase =diversity_loss_weight
# ctc loss
__UpperCamelCase =ctc_loss_reduction
__UpperCamelCase =ctc_zero_infinity
# adapter
__UpperCamelCase =add_adapter
__UpperCamelCase =adapter_kernel_size
__UpperCamelCase =adapter_stride
__UpperCamelCase =num_adapter_layers
__UpperCamelCase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCamelCase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase =list(_SCREAMING_SNAKE_CASE )
__UpperCamelCase =list(_SCREAMING_SNAKE_CASE )
__UpperCamelCase =list(_SCREAMING_SNAKE_CASE )
__UpperCamelCase =xvector_output_dim
@property
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 371
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowercase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 85
| 0
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase ="pt"
elif is_tf_available():
__UpperCAmelCase ="tf"
else:
__UpperCAmelCase ="jax"
class a__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Union[str, Any] =PerceiverTokenizer
lowerCamelCase : Tuple =False
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
super().setUp()
__lowerCamelCase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def SCREAMING_SNAKE_CASE__ ( self : Dict , **a : str ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : str , a : Any=False , a : Any=20 , a : Union[str, Any]=5 ):
"""simple docstring"""
__lowerCamelCase = []
for i in range(len(a ) ):
try:
__lowerCamelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=a )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowerCamelCase = list(filter(lambda a : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , a ) )
__lowerCamelCase = list(filter(lambda a : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=a ) , a ) )
if max_length is not None and len(a ) > max_length:
__lowerCamelCase = toks[:max_length]
if min_length is not None and len(a ) < min_length and len(a ) > 0:
while len(a ) < min_length:
__lowerCamelCase = toks + toks
# toks_str = [t[1] for t in toks]
__lowerCamelCase = [t[0] for t in toks]
# Ensure consistency
__lowerCamelCase = tokenizer.decode(a , clean_up_tokenization_spaces=a )
if " " not in output_txt and len(a ) > 1:
__lowerCamelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a )
)
if with_prefix_space:
__lowerCamelCase = ''' ''' + output_txt
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a )
return output_txt, output_ids
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = self.perceiver_tokenizer
__lowerCamelCase = '''Unicode €.'''
__lowerCamelCase = tokenizer(a )
__lowerCamelCase = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5]
self.assertEqual(encoded['''input_ids'''] , a )
# decoding
__lowerCamelCase = tokenizer.decode(a )
self.assertEqual(a , '''[CLS]Unicode €.[SEP]''' )
__lowerCamelCase = tokenizer('''e è é ê ë''' )
__lowerCamelCase = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5]
self.assertEqual(encoded['''input_ids'''] , a )
# decoding
__lowerCamelCase = tokenizer.decode(a )
self.assertEqual(a , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.perceiver_tokenizer
__lowerCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
__lowerCamelCase = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0]
# fmt: on
__lowerCamelCase = tokenizer(a , padding=a , return_tensors=a )
self.assertIsInstance(a , a )
if FRAMEWORK != "jax":
__lowerCamelCase = list(batch.input_ids.numpy()[0] )
else:
__lowerCamelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a , a )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.perceiver_tokenizer
__lowerCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__lowerCamelCase = tokenizer(a , padding=a , return_tensors=a )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , a )
self.assertIn('''attention_mask''' , a )
self.assertNotIn('''decoder_input_ids''' , a )
self.assertNotIn('''decoder_attention_mask''' , a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.perceiver_tokenizer
__lowerCamelCase = [
'''Summary of the text.''',
'''Another summary.''',
]
__lowerCamelCase = tokenizer(
text_target=a , max_length=32 , padding='''max_length''' , truncation=a , return_tensors=a )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowerCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = ''' He is very happy, UNwant\u00E9d,running'''
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a )
tokenizer.save_pretrained(a )
__lowerCamelCase = tokenizer.__class__.from_pretrained(a )
__lowerCamelCase = after_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
shutil.rmtree(a )
__lowerCamelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__lowerCamelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a )
tokenizer.save_pretrained(a )
__lowerCamelCase = tokenizer.__class__.from_pretrained(a )
__lowerCamelCase = after_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowerCamelCase = tokenizer.__class__.from_pretrained(a , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a )
with open(os.path.join(a , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__lowerCamelCase = json.load(a )
with open(os.path.join(a , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__lowerCamelCase = json.load(a )
__lowerCamelCase = [f"""<extra_id_{i}>""" for i in range(1_25 )]
__lowerCamelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
__lowerCamelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(a , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(a , a )
with open(os.path.join(a , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(a , a )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowerCamelCase = tokenizer_class.from_pretrained(
a , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowerCamelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=a )]
__lowerCamelCase = tokenizer_class.from_pretrained(
a , additional_special_tokens=a , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_78] ) , '''�''' )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizers(fast=a , do_lower_case=a )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__lowerCamelCase = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
__lowerCamelCase = tokenizer.convert_tokens_to_string(a )
self.assertIsInstance(a , a )
| 67
|
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict ="M-CLIP"
def __init__( self : Tuple , a : Optional[int]=10_24 , a : Tuple=7_68 , **a : List[str] ):
"""simple docstring"""
__lowerCamelCase = transformerDimSize
__lowerCamelCase = imageDimSize
super().__init__(**a )
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] =MCLIPConfig
def __init__( self : str , a : List[Any] , *a : Dict , **a : str ):
"""simple docstring"""
super().__init__(a , *a , **a )
__lowerCamelCase = XLMRobertaModel(a )
__lowerCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : int , a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.transformer(input_ids=a , attention_mask=a )[0]
__lowerCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(a ), embs
| 67
| 1
|
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'EncodecFeatureExtractor'
SCREAMING_SNAKE_CASE = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str]) -> Optional[Any]:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self.feature_extractor
__lowerCAmelCase : Any = False
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Any=None , _SCREAMING_SNAKE_CASE: Any=None , _SCREAMING_SNAKE_CASE: Any=True) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=_SCREAMING_SNAKE_CASE , language=_SCREAMING_SNAKE_CASE , no_timestamps=_SCREAMING_SNAKE_CASE)
def __call__( self: Optional[Any] , *_SCREAMING_SNAKE_CASE: Union[str, Any] , **_SCREAMING_SNAKE_CASE: Tuple) -> Optional[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = kwargs.pop("audio" , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = kwargs.pop("sampling_rate" , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = kwargs.pop("text" , _SCREAMING_SNAKE_CASE)
if len(_SCREAMING_SNAKE_CASE) > 0:
__lowerCAmelCase : List[str] = args[0]
__lowerCAmelCase : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if text is not None:
__lowerCAmelCase : int = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
if audio is not None:
__lowerCAmelCase : Tuple = self.feature_extractor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__lowerCAmelCase : Optional[Any] = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
__lowerCAmelCase : Dict = audio_inputs["padding_mask"]
return inputs
def _SCREAMING_SNAKE_CASE ( self: str , *_SCREAMING_SNAKE_CASE: str , **_SCREAMING_SNAKE_CASE: Any) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Dict = kwargs.pop("audio" , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = kwargs.pop("padding_mask" , _SCREAMING_SNAKE_CASE)
if len(_SCREAMING_SNAKE_CASE) > 0:
__lowerCAmelCase : List[Any] = args[0]
__lowerCAmelCase : str = args[1:]
if audio_values is not None:
return self._decode_audio(_SCREAMING_SNAKE_CASE , padding_mask=_SCREAMING_SNAKE_CASE)
else:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[str] , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: List[Any]) -> str:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional = None) -> List[np.ndarray]:
"""simple docstring"""
__lowerCAmelCase : Tuple = to_numpy(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = audio_values.shape
if padding_mask is None:
return list(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = to_numpy(_SCREAMING_SNAKE_CASE)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__lowerCAmelCase : Optional[int] = seq_len - padding_mask.shape[-1]
__lowerCAmelCase : Optional[Any] = 1 - self.feature_extractor.padding_value
__lowerCAmelCase : Dict = np.pad(_SCREAMING_SNAKE_CASE , ((0, 0), (0, difference)) , "constant" , constant_values=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = audio_values.tolist()
for i in range(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Dict = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__lowerCAmelCase : Optional[int] = sliced_audio.reshape(_SCREAMING_SNAKE_CASE , -1)
return audio_values
| 58
|
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__snake_case : str = logging.get_logger(__name__)
# General docstring
__snake_case : Optional[int] = 'PoolFormerConfig'
# Base docstring
__snake_case : Any = 'sail/poolformer_s12'
__snake_case : Optional[Any] = [1, 512, 7, 7]
# Image classification docstring
__snake_case : List[Any] = 'sail/poolformer_s12'
__snake_case : Optional[Any] = 'tabby, tabby cat'
__snake_case : Union[str, Any] = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _lowercase ( __snake_case ,__snake_case = 0.0 ,__snake_case = False ) -> Tuple:
if drop_prob == 0.0 or not training:
return input
__lowerCAmelCase : Optional[int] = 1 - drop_prob
__lowerCAmelCase : Union[str, Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__lowerCAmelCase : List[str] = keep_prob + torch.rand(__snake_case ,dtype=input.dtype ,device=input.device )
random_tensor.floor_() # binarize
__lowerCAmelCase : Tuple = input.div(__snake_case ) * random_tensor
return output
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[float] = None) -> None:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Dict = drop_prob
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: torch.Tensor) -> torch.Tensor:
"""simple docstring"""
return drop_path(_SCREAMING_SNAKE_CASE , self.drop_prob , self.training)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob)
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Any=None) -> int:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Optional[int] = patch_size if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable) else (patch_size, patch_size)
__lowerCAmelCase : Any = stride if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable) else (stride, stride)
__lowerCAmelCase : Any = padding if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable) else (padding, padding)
__lowerCAmelCase : Optional[int] = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = norm_layer(_SCREAMING_SNAKE_CASE) if norm_layer else nn.Identity()
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : str = self.projection(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = self.norm(_SCREAMING_SNAKE_CASE)
return embeddings
class A__ ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: List[Any]) -> Tuple:
"""simple docstring"""
super().__init__(1 , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: int , _SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Dict = nn.AvgPoolad(_SCREAMING_SNAKE_CASE , stride=1 , padding=pool_size // 2 , count_include_pad=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Dict) -> Dict:
"""simple docstring"""
return self.pool(_SCREAMING_SNAKE_CASE) - hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Dict = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1)
__lowerCAmelCase : Tuple = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1)
__lowerCAmelCase : Any = PoolFormerDropPath(_SCREAMING_SNAKE_CASE)
if isinstance(config.hidden_act , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Optional[int] = ACTaFN[config.hidden_act]
else:
__lowerCAmelCase : int = config.hidden_act
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = self.conva(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = self.act_fn(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = self.drop(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = self.conva(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self.drop(_SCREAMING_SNAKE_CASE)
return hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]) -> str:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : List[str] = PoolFormerPooling(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = PoolFormerOutput(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = PoolFormerGroupNorm(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = PoolFormerGroupNorm(_SCREAMING_SNAKE_CASE)
# Useful for training neural nets
__lowerCAmelCase : Optional[int] = PoolFormerDropPath(_SCREAMING_SNAKE_CASE) if drop_path > 0.0 else nn.Identity()
__lowerCAmelCase : Union[str, Any] = config.use_layer_scale
if config.use_layer_scale:
__lowerCAmelCase : List[Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_SCREAMING_SNAKE_CASE)) , requires_grad=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_SCREAMING_SNAKE_CASE)) , requires_grad=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
if self.use_layer_scale:
__lowerCAmelCase : int = self.pooling(self.before_norm(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : List[str] = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * pooling_output
# First residual connection
__lowerCAmelCase : Optional[Any] = hidden_states + self.drop_path(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = ()
__lowerCAmelCase : Union[str, Any] = self.output(self.after_norm(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : Dict = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * layer_output
# Second residual connection
__lowerCAmelCase : List[str] = hidden_states + self.drop_path(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = (output,) + outputs
return outputs
else:
__lowerCAmelCase : Optional[Any] = self.drop_path(self.pooling(self.before_norm(_SCREAMING_SNAKE_CASE)))
# First residual connection
__lowerCAmelCase : Optional[Any] = pooling_output + hidden_states
__lowerCAmelCase : List[Any] = ()
# Second residual connection inside the PoolFormerOutput block
__lowerCAmelCase : Any = self.drop_path(self.output(self.after_norm(_SCREAMING_SNAKE_CASE)))
__lowerCAmelCase : str = hidden_states + layer_output
__lowerCAmelCase : List[Any] = (output,) + outputs
return outputs
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Optional[int] = config
# stochastic depth decay rule
__lowerCAmelCase : Tuple = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths))]
# patch embeddings
__lowerCAmelCase : List[str] = []
for i in range(config.num_encoder_blocks):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ))
__lowerCAmelCase : Tuple = nn.ModuleList(_SCREAMING_SNAKE_CASE)
# Transformer blocks
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : Any = 0
for i in range(config.num_encoder_blocks):
# each block consists of layers
__lowerCAmelCase : List[Any] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i]):
layers.append(
PoolFormerLayer(
_SCREAMING_SNAKE_CASE , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio) , drop_path=dpr[cur + j] , ))
blocks.append(nn.ModuleList(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : Union[str, Any] = nn.ModuleList(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=True) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Dict = () if output_hidden_states else None
__lowerCAmelCase : Union[str, Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block)):
__lowerCAmelCase , __lowerCAmelCase : str = layers
# Get patch embeddings from hidden_states
__lowerCAmelCase : str = embedding_layer(_SCREAMING_SNAKE_CASE)
# Send the embeddings through the blocks
for _, blk in enumerate(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : int = blk(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = layer_outputs[0]
if output_hidden_states:
__lowerCAmelCase : int = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = PoolFormerConfig
SCREAMING_SNAKE_CASE = 'poolformer'
SCREAMING_SNAKE_CASE = 'pixel_values'
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: List[Any]) -> List[str]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_SCREAMING_SNAKE_CASE , nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]=False) -> Dict:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[Any] = value
__snake_case : Union[str, Any] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__snake_case : str = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , __SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: Optional[int]) -> Any:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = config
__lowerCAmelCase : Any = PoolFormerEncoder(_SCREAMING_SNAKE_CASE)
# Initialize weights and apply final processing
self.post_init()
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[torch.FloatTensor] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
__lowerCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
__lowerCAmelCase : Union[str, Any] = self.encoder(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , config.hidden_size)
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.dense(_SCREAMING_SNAKE_CASE)
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , __SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Dict:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = config.num_labels
__lowerCAmelCase : Tuple = PoolFormerModel(_SCREAMING_SNAKE_CASE)
# Final norm
__lowerCAmelCase : Optional[Any] = PoolFormerGroupNorm(config.hidden_sizes[-1])
# Classifier head
__lowerCAmelCase : Any = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[torch.FloatTensor] = None , _SCREAMING_SNAKE_CASE: Optional[torch.LongTensor] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
__lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : Union[str, Any] = self.poolformer(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = outputs[0]
__lowerCAmelCase : Optional[int] = self.classifier(self.norm(_SCREAMING_SNAKE_CASE).mean([-2, -1]))
__lowerCAmelCase : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCAmelCase : int = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCAmelCase : List[Any] = "single_label_classification"
else:
__lowerCAmelCase : Union[str, Any] = "multi_label_classification"
if self.config.problem_type == "regression":
__lowerCAmelCase : Dict = MSELoss()
if self.num_labels == 1:
__lowerCAmelCase : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze())
else:
__lowerCAmelCase : int = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
elif self.config.problem_type == "single_label_classification":
__lowerCAmelCase : int = CrossEntropyLoss()
__lowerCAmelCase : str = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__lowerCAmelCase : Union[str, Any] = BCEWithLogitsLoss()
__lowerCAmelCase : Optional[int] = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
if not return_dict:
__lowerCAmelCase : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states)
| 58
| 1
|
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowercase_ = logging.get_logger(__name__)
lowercase_ = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
lowercase_ = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
lowercase_ = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
lowercase_ = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
lowercase_ = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
lowercase_ = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_MAPPING
lowercase_ = auto_class_update(FlaxAutoModel)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 58
|
"""simple docstring"""
def __lowercase ( _a , _a , _a=False ):
if isinstance(_a , _a ) and isinstance(_a , _a ):
snake_case_ : Union[str, Any] = len(set_a.intersection(_a ) )
if alternative_union:
snake_case_ : Any = len(_a ) + len(_a )
else:
snake_case_ : str = len(set_a.union(_a ) )
return intersection / union
if isinstance(_a , (list, tuple) ) and isinstance(_a , (list, tuple) ):
snake_case_ : str = [element for element in set_a if element in set_b]
if alternative_union:
snake_case_ : Tuple = len(_a ) + len(_a )
return len(_a ) / union
else:
snake_case_ : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(_a ) / len(_a )
return len(_a ) / len(_a )
return None
if __name__ == "__main__":
lowercase__ : Any = {'''a''', '''b''', '''c''', '''d''', '''e'''}
lowercase__ : Optional[Any] = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 264
| 0
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
A : List[Any] = logging.get_logger(__name__)
A : List[Any] = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
A : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCAmelCase ( a__ ) -> Any:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__a = model_type_to_module_name(a__ )
__a = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(a__ , a__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(a__ , '''__name__''' , a__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__a = importlib.import_module('''transformers''' )
if hasattr(a__ , a__ ):
return getattr(a__ , a__ )
return None
def __lowerCAmelCase ( a__ , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , **a__ , ) -> List[Any]:
__a = get_file_from_repo(
a__ , a__ , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(a__ , encoding='''utf-8''' ) as reader:
return json.load(a__ )
class __A:
def __init__( self ) -> Optional[int]:
'''simple docstring'''
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_snake_case )
def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , **_snake_case ) -> Optional[int]:
'''simple docstring'''
__a = kwargs.pop('''config''' , _snake_case )
__a = kwargs.pop('''trust_remote_code''' , _snake_case )
__a = True
__a , __a = ImageProcessingMixin.get_image_processor_dict(_snake_case , **_snake_case )
__a = config_dict.get('''image_processor_type''' , _snake_case )
__a = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
__a = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__a = config_dict.pop('''feature_extractor_type''' , _snake_case )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
__a = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
__a = config_dict['''auto_map''']['''AutoFeatureExtractor''']
__a = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_snake_case , _snake_case ):
__a = AutoConfig.from_pretrained(_snake_case , **_snake_case )
# It could be in `config.image_processor_type``
__a = getattr(_snake_case , '''image_processor_type''' , _snake_case )
if hasattr(_snake_case , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
__a = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
__a = image_processor_class_from_name(_snake_case )
__a = image_processor_auto_map is not None
__a = image_processor_class is not None or type(_snake_case ) in IMAGE_PROCESSOR_MAPPING
__a = resolve_trust_remote_code(
_snake_case , _snake_case , _snake_case , _snake_case )
if has_remote_code and trust_remote_code:
__a = get_class_from_dynamic_module(
_snake_case , _snake_case , **_snake_case )
__a = kwargs.pop('''code_revision''' , _snake_case )
if os.path.isdir(_snake_case ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_snake_case , **_snake_case )
elif image_processor_class is not None:
return image_processor_class.from_dict(_snake_case , **_snake_case )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_snake_case ) in IMAGE_PROCESSOR_MAPPING:
__a = IMAGE_PROCESSOR_MAPPING[type(_snake_case )]
return image_processor_class.from_dict(_snake_case , **_snake_case )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( _snake_case , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(_snake_case , _snake_case )
| 350
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __snake_case , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = LDMTextToImagePipeline
_snake_case : int = TEXT_TO_IMAGE_PARAMS - {
'negative_prompt',
'negative_prompt_embeds',
'cross_attention_kwargs',
'prompt_embeds',
}
_snake_case : Tuple = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'callback',
'callback_steps',
}
_snake_case : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case : Optional[Any] = False
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_UpperCamelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCamelCase = CLIPTextModel(A_ )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int=0 ) -> Dict:
'''simple docstring'''
if str(A_ ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(A_ )
else:
_UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
_UpperCamelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self : int ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = LDMTextToImagePipeline(**A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_UpperCamelCase = self.get_dummy_inputs(A_ )
_UpperCamelCase = pipe(**A_ ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_UpperCamelCase = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any=torch.floataa , lowerCAmelCase__ : int=0 ) -> Any:
'''simple docstring'''
_UpperCamelCase = torch.manual_seed(A_ )
_UpperCamelCase = np.random.RandomState(A_ ).standard_normal((1, 4, 32, 32) )
_UpperCamelCase = torch.from_numpy(A_ ).to(device=A_ , dtype=A_ )
_UpperCamelCase = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_UpperCamelCase = self.get_inputs(A_ )
_UpperCamelCase = pipe(**A_ ).images
_UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] )
_UpperCamelCase = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=torch.floataa , lowerCAmelCase__ : Optional[int]=0 ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = torch.manual_seed(A_ )
_UpperCamelCase = np.random.RandomState(A_ ).standard_normal((1, 4, 32, 32) )
_UpperCamelCase = torch.from_numpy(A_ ).to(device=A_ , dtype=A_ )
_UpperCamelCase = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self : Dict ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_UpperCamelCase = self.get_inputs(A_ )
_UpperCamelCase = pipe(**A_ ).images[0]
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' )
_UpperCamelCase = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 324
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : str = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
__lowerCamelCase : List[str] = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def A_ ( _lowerCAmelCase ) -> List[str]:
UpperCamelCase : Optional[Any] = None
# source code of `config_class`
UpperCamelCase : Tuple = inspect.getsource(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = _re_checkpoint.findall(_lowerCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
UpperCamelCase : Dict = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase : Any = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCamelCase : List[Any] = ckpt_name
break
return checkpoint
def A_ ( ) -> List[str]:
UpperCamelCase : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCamelCase : Union[str, Any] = get_checkpoint_from_config_class(_lowerCAmelCase )
UpperCamelCase : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Any = "\n".join(sorted(_lowerCAmelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 52
| 0
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__A = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __init__( self: Any , *__A: Union[str, Any] , **__A: Dict ) -> None:
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , __A , )
super().__init__(*__A , **__A )
| 75
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Union[str, Any] ) -> Union[str, Any]:
_A = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_A = dict(zip(__A , range(len(__A ) ) ) )
_A = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_A = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_A = tempfile.mkdtemp()
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(self.tmpdirname , __A )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
# load decoder from hub
_A = '''hf-internal-testing/ngram-beam-search-decoder'''
def __A ( self: Tuple , **__A: str ) -> str:
_A = self.add_kwargs_tokens_map.copy()
kwargs.update(__A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__A )
def __A ( self: Any , **__A: List[Any] ) -> Union[str, Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__A )
def __A ( self: List[Any] , **__A: Union[str, Any] ) -> int:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__A )
def __A ( self: List[str] ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __A ( self: List[str] ) -> Optional[Any]:
_A = self.get_tokenizer()
_A = self.get_feature_extractor()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
processor.save_pretrained(self.tmpdirname )
_A = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __A )
def __A ( self: Optional[int] ) -> Union[str, Any]:
_A = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_A = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __A ( self: str ) -> Any:
_A = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(__A , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=__A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __A ( self: List[str] ) -> str:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = floats_list((3, 10_00) )
_A = feature_extractor(__A , return_tensors='''np''' )
_A = processor(__A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self: Union[str, Any] ) -> Optional[Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = '''This is a test string'''
_A = processor(text=__A )
_A = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self: List[str] , __A: Optional[int]=(2, 10, 16) , __A: Optional[int]=77 ) -> List[Any]:
np.random.seed(__A )
return np.random.rand(*__A )
def __A ( self: List[Any] ) -> Optional[Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_A = processor.decode(__A )
_A = decoder.decode_beams(__A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def __A ( self: str , __A: Any ) -> int:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_A = processor.batch_decode(__A )
else:
with get_context(__A ).Pool() as pool:
_A = processor.batch_decode(__A , __A )
_A = list(__A )
with get_context('''fork''' ).Pool() as p:
_A = decoder.decode_beams_batch(__A , __A )
_A ,_A ,_A = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__A , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(__A , decoded_processor.logit_score )
self.assertListEqual(__A , decoded_processor.lm_score )
def __A ( self: Optional[Any] ) -> int:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits()
_A = 15
_A = -20.0
_A = -4.0
_A = processor.batch_decode(
__A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , )
_A = decoded_processor_out.text
_A = list(__A )
with get_context('''fork''' ).Pool() as pool:
_A = decoder.decode_beams_batch(
__A , __A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , )
_A = [d[0][0] for d in decoded_decoder_out]
_A = [d[0][2] for d in decoded_decoder_out]
_A = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__A , __A )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , __A )
self.assertTrue(np.array_equal(__A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __A , atol=1e-3 ) )
self.assertTrue(np.array_equal(__A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , __A , atol=1e-3 ) )
def __A ( self: Optional[int] ) -> Dict:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits()
_A = 2.0
_A = 5.0
_A = -20.0
_A = True
_A = processor.batch_decode(
__A , alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , )
_A = decoded_processor_out.text
_A = list(__A )
decoder.reset_params(
alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , )
with get_context('''fork''' ).Pool() as pool:
_A = decoder.decode_beams_batch(
__A , __A , )
_A = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__A , __A )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , __A )
_A = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __A )
def __A ( self: int ) -> Optional[Any]:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_A = os.listdir(__A )
_A = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__A , __A )
def __A ( self: Tuple ) -> Any:
_A = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_A = WavaVecaProcessorWithLM.from_pretrained(__A )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_A = os.listdir(__A )
_A = os.listdir(__A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__A , __A )
def __A ( self: List[str] ) -> Tuple:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = floats_list((3, 10_00) )
_A = processor_wavaveca(__A , return_tensors='''np''' )
_A = processor_auto(__A , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
_A = self._get_dummy_logits()
_A = processor_wavaveca.batch_decode(__A )
_A = processor_auto.batch_decode(__A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __A ( self: Optional[int] ) -> Any:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def __A ( __A: int , __A: List[str] ) -> Union[str, Any]:
_A = [d[key] for d in offsets]
return retrieved_list
def __A ( self: Optional[Any] ) -> int:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = self._get_dummy_logits()[0]
_A = processor.decode(__A , output_word_offsets=__A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__A , __A ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def __A ( self: Optional[Any] ) -> Tuple:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = self._get_dummy_logits()
_A = processor.batch_decode(__A , output_word_offsets=__A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__A , __A ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(__A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __A ( self: Optional[Any] ) -> Optional[Any]:
import torch
_A = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=__A )
_A = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_A = iter(__A )
_A = next(__A )
_A = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_A = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_A = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_A = model(__A ).logits.cpu().numpy()
_A = processor.decode(logits[0] , output_word_offsets=__A )
_A = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_A = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_A = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(__A , '''word''' ) ) , __A )
self.assertEqual(''' '''.join(self.get_from_offsets(__A , '''word''' ) ) , output.text )
# output times
_A = torch.tensor(self.get_from_offsets(__A , '''start_time''' ) )
_A = torch.tensor(self.get_from_offsets(__A , '''end_time''' ) )
# fmt: off
_A = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
_A = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__A , __A , atol=0.01 ) )
self.assertTrue(torch.allclose(__A , __A , atol=0.01 ) )
| 75
| 1
|
import os
from datetime import datetime as dt
from github import Github
_lowerCamelCase : Dict = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def a_ ( ) -> List[Any]:
_snake_case = Github(os.environ['GITHUB_TOKEN'] )
_snake_case = g.get_repo('huggingface/diffusers' )
_snake_case = repo.get_issues(state='open' )
for issue in open_issues:
_snake_case = sorted(issue.get_comments() , key=lambda __lowercase : i.created_at , reverse=__a )
_snake_case = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 282
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ :str = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :str = min_resolution
UpperCamelCase__ :Optional[Any] = max_resolution
UpperCamelCase__ :int = do_resize
UpperCamelCase__ :Optional[Any] = size
UpperCamelCase__ :Tuple = do_normalize
UpperCamelCase__ :List[Any] = image_mean
UpperCamelCase__ :Dict = image_std
UpperCamelCase__ :Union[str, Any] = do_rescale
UpperCamelCase__ :Union[str, Any] = rescale_factor
UpperCamelCase__ :Union[str, Any] = do_pad
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ :List[str] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ :Dict = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ :int = self.size['''shortest_edge''']
UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ :str = self.size['''shortest_edge''']
UpperCamelCase__ :str = self.size['''shortest_edge''']
else:
UpperCamelCase__ :Any = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.loads(f.read() )
UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
UpperCamelCase__ :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Tuple = json.loads(f.read() )
UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
UpperCamelCase__ :Optional[Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
UpperCamelCase__ :List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
| 97
| 0
|
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
set_seed(7_7_0)
lowerCAmelCase :str = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowerCAmelCase :Any = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowerCAmelCase :List[Any] = os.path.dirname(os.path.abspath(__file__))
lowerCAmelCase :List[Any] = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowerCAmelCase :List[str] = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
__magic_name__ : str = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase , REMOTE_MODEL_PATHS[key]['file_name'] )
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
hf_hub_download(repo_id=lowerCAmelCase , filename=lowerCAmelCase , local_dir=lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : str="text" ):
"""simple docstring"""
if model_type == "text":
__magic_name__ : Tuple = BarkSemanticModel
__magic_name__ : Optional[int] = BarkSemanticConfig
__magic_name__ : List[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
__magic_name__ : List[str] = BarkCoarseModel
__magic_name__ : Dict = BarkCoarseConfig
__magic_name__ : Tuple = BarkCoarseGenerationConfig
elif model_type == "fine":
__magic_name__ : Optional[Any] = BarkFineModel
__magic_name__ : Dict = BarkFineConfig
__magic_name__ : Tuple = BarkFineGenerationConfig
else:
raise NotImplementedError()
__magic_name__ : int = f'{model_type}_small' if use_small else model_type
__magic_name__ : List[str] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase ):
logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info['repo_id'] , model_info['file_name'] )
__magic_name__ : Optional[Any] = torch.load(lowerCAmelCase , map_location=lowerCAmelCase )
# this is a hack
__magic_name__ : Optional[Any] = checkpoint['model_args']
if "input_vocab_size" not in model_args:
__magic_name__ : Dict = model_args['vocab_size']
__magic_name__ : Optional[int] = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__magic_name__ : Optional[Any] = model_args.pop('n_head' )
__magic_name__ : List[str] = model_args.pop('n_embd' )
__magic_name__ : List[Any] = model_args.pop('n_layer' )
__magic_name__ : Optional[Any] = ConfigClass(**checkpoint['model_args'] )
__magic_name__ : Any = ModelClass(config=lowerCAmelCase )
__magic_name__ : List[str] = GenerationConfigClass()
__magic_name__ : List[Any] = model_generation_config
__magic_name__ : str = checkpoint['model']
# fixup checkpoint
__magic_name__ : str = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase ):
# replace part of the key with corresponding layer name in HF implementation
__magic_name__ : Tuple = k[len(lowerCAmelCase ) :]
for old_layer_name in new_layer_name_dict:
__magic_name__ : int = new_k.replace(lowerCAmelCase , new_layer_name_dict[old_layer_name] )
__magic_name__ : Union[str, Any] = state_dict.pop(lowerCAmelCase )
__magic_name__ : Optional[Any] = set(state_dict.keys() ) - set(model.state_dict().keys() )
__magic_name__ : Any = {k for k in extra_keys if not k.endswith('.attn.bias' )}
__magic_name__ : Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
__magic_name__ : Dict = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(lowerCAmelCase ) != 0:
raise ValueError(f'extra keys found: {extra_keys}' )
if len(lowerCAmelCase ) != 0:
raise ValueError(f'missing keys: {missing_keys}' )
model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
__magic_name__ : Union[str, Any] = model.num_parameters(exclude_embeddings=lowerCAmelCase )
__magic_name__ : Optional[Any] = checkpoint['best_val_loss'].item()
logger.info(f'model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowerCAmelCase , 3 )} loss' )
model.eval()
model.to(lowerCAmelCase )
del checkpoint, state_dict
return model
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Tuple="text" ):
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__magic_name__ : List[str] = 'cpu' # do conversion on cpu
__magic_name__ : int = _get_ckpt_path(lowerCAmelCase , use_small=lowerCAmelCase )
__magic_name__ : Any = _load_model(lowerCAmelCase , lowerCAmelCase , model_type=lowerCAmelCase , use_small=lowerCAmelCase )
# load bark initial model
__magic_name__ : List[str] = _bark_load_model(lowerCAmelCase , 'cpu' , model_type=lowerCAmelCase , use_small=lowerCAmelCase )
if model_type == "text":
__magic_name__ : int = bark_model['model']
if model.num_parameters(exclude_embeddings=lowerCAmelCase ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
__magic_name__ : Union[str, Any] = 5
__magic_name__ : Optional[int] = 10
if model_type in ["text", "coarse"]:
__magic_name__ : Optional[Any] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__magic_name__ : List[str] = bark_model(lowerCAmelCase )[0]
__magic_name__ : Optional[int] = model(lowerCAmelCase )
# take last logits
__magic_name__ : int = output_new_model_total.logits[:, [-1], :]
else:
__magic_name__ : Tuple = 3
__magic_name__ : List[str] = 8
__magic_name__ : List[str] = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__magic_name__ : str = model(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Tuple = bark_model(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Tuple = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
model.save_pretrained(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : str , ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Dict = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) )
__magic_name__ : str = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) )
__magic_name__ : int = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase , 'config.json' ) )
__magic_name__ : List[Any] = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
__magic_name__ : Optional[int] = BarkSemanticModel.from_pretrained(lowerCAmelCase )
__magic_name__ : Dict = BarkCoarseModel.from_pretrained(lowerCAmelCase )
__magic_name__ : List[str] = BarkFineModel.from_pretrained(lowerCAmelCase )
__magic_name__ : Optional[Any] = EncodecModel.from_pretrained('facebook/encodec_24khz' )
__magic_name__ : Dict = BarkConfig.from_sub_model_configs(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__magic_name__ : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__magic_name__ : int = BarkModel(lowerCAmelCase )
__magic_name__ : List[str] = semantic
__magic_name__ : Optional[int] = coarseAcoustic
__magic_name__ : List[str] = fineAcoustic
__magic_name__ : int = codec
__magic_name__ : Union[str, Any] = bark_generation_config
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
bark.save_pretrained(lowerCAmelCase , repo_id=lowerCAmelCase , push_to_hub=lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowerCAmelCase :Union[str, Any] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 275
| 1
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any , __lowercase : int , __lowercase : int = 13 , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 3 , __lowercase : int = 3 , __lowercase : bool = True , __lowercase : bool = True , __lowercase : int = 1_28 , __lowercase : str=[16, 32, 64, 1_28] , __lowercase : int = 7 , __lowercase : int = 4 , __lowercase : int = 37 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 10 , __lowercase : float = 0.02 , __lowercase : int = 2 , __lowercase : int = 1 , __lowercase : int = 1_28 , __lowercase : List[int] = [2, 2, 2, 2] , __lowercase : int = 2 , __lowercase : int = 2 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = encoder_stride
snake_case_ = num_attention_outputs
snake_case_ = embed_dim
snake_case_ = embed_dim + 1
snake_case_ = resolution
snake_case_ = depths
snake_case_ = hidden_sizes
snake_case_ = dim
snake_case_ = mlp_expansion_ratio
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def snake_case__ ( self : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : Optional[int] ):
"""simple docstring"""
snake_case_ = TFEfficientFormerModel(config=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , training=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Dict , __lowercase : str , __lowercase : Tuple , __lowercase : Optional[Any] ):
"""simple docstring"""
snake_case_ = self.type_sequence_label_size
snake_case_ = TFEfficientFormerForImageClassification(UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ , training=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = TFEfficientFormerForImageClassification(UpperCAmelCase_ )
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = TFEfficientFormerModelTester(self )
snake_case_ = ConfigTester(
self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def snake_case__ ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def snake_case__ ( self : List[str] ):
"""simple docstring"""
pass
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
def check_hidden_states_output(__lowercase : Any , __lowercase : str , __lowercase : Union[str, Any] ):
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) , training=UpperCAmelCase_ )
snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
if hasattr(self.model_tester , "encoder_seq_length" ):
snake_case_ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
snake_case_ = seq_length * self.model_tester.chunk_length
else:
snake_case_ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
snake_case_ = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCAmelCase_ , (list, tuple) )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
snake_case_ = getattr(self.model_tester , "seq_length" , UpperCAmelCase_ )
snake_case_ = getattr(self.model_tester , "decoder_seq_length" , UpperCAmelCase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def snake_case__ ( self : Dict , __lowercase : List[Any] , __lowercase : Any , __lowercase : Union[str, Any]=False ):
"""simple docstring"""
snake_case_ = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_ )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
snake_case_ = getattr(self.model_tester , "seq_length" , UpperCAmelCase_ )
snake_case_ = getattr(self.model_tester , "encoder_seq_length" , UpperCAmelCase_ )
snake_case_ = getattr(self.model_tester , "key_length" , UpperCAmelCase_ )
snake_case_ = getattr(self.model_tester , "chunk_length" , UpperCAmelCase_ )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
snake_case_ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) , training=UpperCAmelCase_ )
snake_case_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) , training=UpperCAmelCase_ )
snake_case_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
snake_case_ = model_class(UpperCAmelCase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
snake_case_ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
snake_case_ = model(UpperCAmelCase_ )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=UpperCAmelCase_ , return_tensors="tf" )
# forward pass
snake_case_ = model(**UpperCAmelCase_ , training=UpperCAmelCase_ )
# verify the logits
snake_case_ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
snake_case_ = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=UpperCAmelCase_ , return_tensors="tf" )
# forward pass
snake_case_ = model(**UpperCAmelCase_ , training=UpperCAmelCase_ )
# verify the logits
snake_case_ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
snake_case_ = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 187
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class lowercase__ ( _UpperCAmelCase ):
def __init__( self : str , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]=None , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ):
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
if config is None:
assert isinstance(self.model , UpperCAmelCase_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
SCREAMING_SNAKE_CASE__ = self.model.config
else:
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = data_args
SCREAMING_SNAKE_CASE__ = self.config.tgt_vocab_size if isinstance(self.config , UpperCAmelCase_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
SCREAMING_SNAKE_CASE__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
SCREAMING_SNAKE_CASE__ = label_smoothed_nll_loss
def A_ ( self : Tuple , UpperCAmelCase_ : int ):
if self.optimizer is None:
SCREAMING_SNAKE_CASE__ = ['bias', 'LayerNorm.weight']
SCREAMING_SNAKE_CASE__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
SCREAMING_SNAKE_CASE__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
SCREAMING_SNAKE_CASE__ = Adafactor
SCREAMING_SNAKE_CASE__ = {'scale_parameter': False, 'relative_step': False}
else:
SCREAMING_SNAKE_CASE__ = AdamW
SCREAMING_SNAKE_CASE__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
SCREAMING_SNAKE_CASE__ = self.args.learning_rate
if self.sharded_ddp:
SCREAMING_SNAKE_CASE__ = OSS(
params=UpperCAmelCase_ , optim=UpperCAmelCase_ , **UpperCAmelCase_ , )
else:
SCREAMING_SNAKE_CASE__ = optimizer_cls(UpperCAmelCase_ , **UpperCAmelCase_ )
if self.lr_scheduler is None:
SCREAMING_SNAKE_CASE__ = self._get_lr_scheduler(UpperCAmelCase_ )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def A_ ( self : str , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
SCREAMING_SNAKE_CASE__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
SCREAMING_SNAKE_CASE__ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
SCREAMING_SNAKE_CASE__ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=UpperCAmelCase_ )
return scheduler
def A_ ( self : List[str] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def A_ ( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
SCREAMING_SNAKE_CASE__ = model(**UpperCAmelCase_ , use_cache=UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE__ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(**UpperCAmelCase_ , labels=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )[:2]
else:
# compute label smoothed loss
SCREAMING_SNAKE_CASE__ = model(**UpperCAmelCase_ , use_cache=UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE__ = torch.nn.functional.log_softmax(UpperCAmelCase_ , dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.loss_fn(UpperCAmelCase_ , UpperCAmelCase_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = inputs.pop('labels' )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._compute_loss(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return loss
def A_ ( self : List[str] , UpperCAmelCase_ : nn.Module , UpperCAmelCase_ : Dict[str, Union[torch.Tensor, Any]] , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[List[str]] = None , ):
SCREAMING_SNAKE_CASE__ = self._prepare_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
SCREAMING_SNAKE_CASE__ = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **UpperCAmelCase_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
SCREAMING_SNAKE_CASE__ = self._pad_tensors_to_max_len(UpperCAmelCase_ , gen_kwargs['max_length'] )
SCREAMING_SNAKE_CASE__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._compute_loss(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
SCREAMING_SNAKE_CASE__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
SCREAMING_SNAKE_CASE__ = self._pad_tensors_to_max_len(UpperCAmelCase_ , gen_kwargs['max_length'] )
return (loss, logits, labels)
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
SCREAMING_SNAKE_CASE__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
SCREAMING_SNAKE_CASE__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
SCREAMING_SNAKE_CASE__ = tensor
return padded_tensor
| 176
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __a ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : List[str] ):
torch.manual_seed(0 )
UpperCamelCase__ : Any =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase__ : str =VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase__ : List[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ : Tuple =self.dummy_uncond_unet
UpperCamelCase__ : int =DDIMScheduler()
UpperCamelCase__ : Union[str, Any] =self.dummy_vq_model
UpperCamelCase__ : str =LDMPipeline(unet=_SCREAMING_SNAKE_CASE , vqvae=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
ldm.to(_SCREAMING_SNAKE_CASE )
ldm.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict =torch.manual_seed(0 )
UpperCamelCase__ : int =ldm(generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''numpy''' ).images
UpperCamelCase__ : Optional[int] =torch.manual_seed(0 )
UpperCamelCase__ : str =ldm(generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''numpy''' , return_dict=_SCREAMING_SNAKE_CASE )[0]
UpperCamelCase__ : Tuple =image[0, -3:, -3:, -1]
UpperCamelCase__ : Dict =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] =np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
UpperCamelCase__ : Tuple =1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __a ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : Dict =LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(_SCREAMING_SNAKE_CASE )
ldm.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict =torch.manual_seed(0 )
UpperCamelCase__ : int =ldm(generator=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , output_type='''numpy''' ).images
UpperCamelCase__ : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase__ : List[Any] =np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
UpperCamelCase__ : List[str] =1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 368
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_SCREAMING_SNAKE_CASE : List[Any] = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
_SCREAMING_SNAKE_CASE : Tuple = """|""".join(sys.argv[1:])
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
_SCREAMING_SNAKE_CASE : str = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 157
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """luke"""
def __init__( self , __lowerCAmelCase=5_0_2_6_7 , __lowerCAmelCase=5_0_0_0_0_0 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=2_5_6 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = entity_vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = entity_emb_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = use_entity_aware_attention
lowerCamelCase__ = classifier_dropout
| 209
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase_ = """BlipImageProcessor"""
lowerCAmelCase_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = False
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = self.image_processor
def __call__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowerCamelCase__ = self.tokenizer
lowerCamelCase__ = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
return text_encoding
# add pixel_values
lowerCamelCase__ = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase )
if text is not None:
lowerCamelCase__ = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
else:
lowerCamelCase__ = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCAmelCase )
return encoding_image_processor
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 209
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A : Union[str, Any] ={
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_A : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 355
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_A : Optional[int] =pd.read_csv('''sample_data.csv''', header=None)
_A : Any =df.shape[:1][0]
# If you're using some other dataset input the target column
_A : List[str] =df.iloc[:, 1:2]
_A : int =actual_data.values.reshape(len_data, 1)
_A : Union[str, Any] =MinMaxScaler().fit_transform(actual_data)
_A : Optional[int] =10
_A : Union[str, Any] =5
_A : Union[str, Any] =20
_A : str =len_data - periods * look_back
_A : List[Any] =actual_data[:division]
_A : Optional[Any] =actual_data[division - look_back :]
_A , _A : Tuple =[], []
_A , _A : List[str] =[], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_A : List[Any] =np.array(train_x)
_A : str =np.array(test_x)
_A : List[Any] =np.array([list(i.ravel()) for i in train_y])
_A : Any =np.array([list(i.ravel()) for i in test_y])
_A : Optional[Any] =Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
_A : Dict =model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_A : List[str] =model.predict(x_test)
| 129
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__( self : Optional[int] ) ->int:
snake_case_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
snake_case_ = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
snake_case_ = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
snake_case_ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case_ = model(_UpperCamelCase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1e-3 ) )
@slow
def snake_case__( self : Union[str, Any] ) ->Tuple:
snake_case_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
snake_case_ = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
snake_case_ = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
snake_case_ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case_ = model(_UpperCamelCase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1e-3 ) )
| 8
|
lowerCamelCase : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def snake_case_ ( ):
__lowercase : List[str] = input("""Enter message: """ )
__lowercase : int = input("""Enter key [alphanumeric]: """ )
__lowercase : Optional[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__lowercase : Optional[int] = """encrypt"""
__lowercase : Dict = encrypt_message(lowerCAmelCase_ , lowerCAmelCase_ )
elif mode.lower().startswith("""d""" ):
__lowercase : Union[str, Any] = """decrypt"""
__lowercase : Optional[int] = decrypt_message(lowerCAmelCase_ , lowerCAmelCase_ )
print(F"\n{mode.title()}ed message:" )
print(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , """encrypt""" )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , """decrypt""" )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Union[str, Any] = []
__lowercase : Tuple = 0
__lowercase : Dict = key.upper()
for symbol in message:
__lowercase : Optional[Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowerCAmelCase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowerCAmelCase_ ):
__lowercase : str = 0
else:
translated.append(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 233
| 0
|
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
def __lt__(self , lowerCamelCase ):
'''simple docstring'''
return self[-1] < other[-1]
def __eq__(self , lowerCamelCase ):
'''simple docstring'''
return self[-1] == other[-1]
def __UpperCAmelCase ( snake_case_ : str ) -> list:
"""simple docstring"""
_lowerCAmelCase = []
# sort into stacks
for element in collection:
_lowerCAmelCase = Stack([element] )
_lowerCAmelCase = bisect_left(__snake_case , __snake_case )
if i != len(__snake_case ):
stacks[i].append(__snake_case )
else:
stacks.append(__snake_case )
# use a heap-based merge to merge stack efficiently
_lowerCAmelCase = merge(*(reversed(__snake_case ) for stack in stacks) )
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 364
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__UpperCamelCase = 'translator'
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ['text', 'text', 'text']
__UpperCamelCase = ['text']
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase , return_tensors="""pt""" , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase )
| 317
| 0
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE : Optional[Any] = 16
SCREAMING_SNAKE_CASE : Union[str, Any] = 32
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ = 16 , lowerCamelCase_ = "bert-base-cased" ) -> Optional[Any]:
_lowercase : str = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_lowercase : Dict = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCamelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_lowercase : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowercase : List[str] = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=lowerCamelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase : int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase_ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(lowerCamelCase_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_lowercase : Tuple = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
_lowercase : str = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
return train_dataloader, eval_dataloader
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
# Initialize accelerator
_lowercase : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase : Optional[int] = config['lr']
_lowercase : Tuple = int(config['num_epochs'] )
_lowercase : int = int(config['seed'] )
_lowercase : Tuple = int(config['batch_size'] )
_lowercase : Any = args.model_name_or_path
set_seed(lowerCamelCase_ )
_lowercase , _lowercase : str = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase : List[Any] = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
# Instantiate optimizer
_lowercase : Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowercase : int = optimizer_cls(params=model.parameters() , lr=lowerCamelCase_ )
if accelerator.state.deepspeed_plugin is not None:
_lowercase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_lowercase : Tuple = 1
_lowercase : Union[str, Any] = (len(lowerCamelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowercase : List[str] = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase_ , num_warmup_steps=0 , num_training_steps=lowerCamelCase_ , )
else:
_lowercase : Dict = DummyScheduler(lowerCamelCase_ , total_num_steps=lowerCamelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# We need to keep track of how many total steps we have iterated over
_lowercase : Any = 0
# We also need to keep track of the stating epoch so files are named properly
_lowercase : Union[str, Any] = 0
# Now we train the model
_lowercase : Tuple = evaluate.load('glue' , 'mrpc' )
_lowercase : Any = 0
_lowercase : Optional[Any] = {}
for epoch in range(lowerCamelCase_ , lowerCamelCase_ ):
model.train()
for step, batch in enumerate(lowerCamelCase_ ):
_lowercase : List[str] = model(**lowerCamelCase_ )
_lowercase : Tuple = outputs.loss
_lowercase : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_lowercase : int = 0
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase : Dict = model(**lowerCamelCase_ )
_lowercase : List[str] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowercase , _lowercase : Dict = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCamelCase_ ) - 1:
_lowercase : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowercase : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCamelCase_ , references=lowerCamelCase_ , )
_lowercase : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCamelCase_ )
_lowercase : List[str] = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
_lowercase : Union[str, Any] = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_( ) -> Any:
_lowercase : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=lowerCamelCase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=lowerCamelCase_ , )
parser.add_argument(
'--output_dir' , type=lowerCamelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=lowerCamelCase_ , default=3 , help='Number of train epochs.' , )
_lowercase : List[str] = parser.parse_args()
_lowercase : List[str] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 21
|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Tuple:
_a = {}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> int:
if self.graph.get(__UpperCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_a = [[w, v]]
if not self.graph.get(__UpperCAmelCase ):
_a = []
def _UpperCAmelCase ( self ) -> int:
return list(self.graph )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]:
if s == d:
return []
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple:
if c == -1:
_a = floor(random() * 10000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_a = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[str]:
_a = deque()
_a = []
if s == -2:
_a = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_a = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
_a = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict:
return len(self.graph[u] )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
_a = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return sorted_nodes
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]:
_a = time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_a = time()
return end - begin
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Optional[Any]:
_a = time()
self.bfs(__UpperCAmelCase )
_a = time()
return end - begin
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Optional[int]:
_a = {}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> Dict:
# check if the u exists
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_a = [[w, v]]
# add the other way
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_a = [[w, u]]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
# the other way round
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Dict:
if s == d:
return []
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple:
if c == -1:
_a = floor(random() * 10000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_a = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[Any]:
_a = deque()
_a = []
if s == -2:
_a = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_a = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict:
return len(self.graph[u] )
def _UpperCAmelCase ( self ) -> int:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return list(self.graph )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Tuple:
_a = time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_a = time()
return end - begin
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_a = time()
self.bfs(__UpperCAmelCase )
_a = time()
return end - begin
| 320
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = 'laion/clap-htsat-unfused'
__a : Optional[Any] = tempfile.mkdtemp()
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.get_tokenizer()
__a : List[str] = self.get_feature_extractor()
__a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a )
processor.save_pretrained(self.tmpdirname )
__a : Tuple = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__a : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__a : List[str] = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 )
__a : Tuple = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_feature_extractor()
__a : int = self.get_tokenizer()
__a : str = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : int = floats_list((3, 1000) )
__a : str = feature_extractor(__a , return_tensors='np' )
__a : int = processor(audios=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_feature_extractor()
__a : Any = self.get_tokenizer()
__a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : Union[str, Any] = 'This is a test string'
__a : Union[str, Any] = processor(text=__a )
__a : Tuple = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_feature_extractor()
__a : str = self.get_tokenizer()
__a : List[str] = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Optional[int] = processor.batch_decode(__a )
__a : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.get_feature_extractor()
__a : Optional[int] = self.get_tokenizer()
__a : int = ClapProcessor(tokenizer=__a , feature_extractor=__a )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 294
| 1
|
def a( A : int = 1000 ) -> str:
"""simple docstring"""
a = 1, 1
a = 2
while True:
a = 0
a = fa + fa
a = fa, f
index += 1
for _ in str(__snake_case ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 227
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
def __init__( self : int , A : Tuple , A : int=3 , A : List[str]=32 , A : Dict=3 , A : Any=10 , A : Dict=[10, 20, 30, 40] , A : Optional[Any]=[1, 1, 2, 1] , A : Union[str, Any]=True , A : Optional[Any]=True , A : Any="relu" , A : Optional[Any]=3 , A : Tuple=None , ) -> Dict:
lowercase_ : str = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Optional[int] = image_size
lowercase_ : int = num_channels
lowercase_ : int = embeddings_size
lowercase_ : str = hidden_sizes
lowercase_ : List[str] = depths
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : Any = hidden_act
lowercase_ : List[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Optional[Any] = len(A )
def A ( self : str ) -> Tuple:
lowercase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Union[str, Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A ( self : Dict ) -> int:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A ( self : str , A : Tuple , A : str , A : str ) -> str:
lowercase_ : str = TFResNetModel(config=A )
lowercase_ : Union[str, Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : Any , A : int , A : List[Any] , A : Optional[Any] ) -> Optional[Any]:
lowercase_ : Tuple = self.num_labels
lowercase_ : Union[str, Any] = TFResNetForImageClassification(A )
lowercase_ : Tuple = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] ) -> Tuple:
lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Dict = config_and_inputs
lowercase_ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Any = False
def A ( self : Union[str, Any] ) -> List[Any]:
lowercase_ : int = TFResNetModelTester(self )
lowercase_ : str = ConfigTester(self , config_class=A , has_text_modality=A )
def A ( self : Dict ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Dict ) -> List[Any]:
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def A ( self : Any ) -> Any:
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def A ( self : List[str] ) -> Optional[Any]:
pass
def A ( self : str ) -> Tuple:
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : int = model_class(A )
lowercase_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : str = [*signature.parameters.keys()]
lowercase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def A ( self : List[str] ) -> Tuple:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : List[Any] ) -> List[str]:
def check_hidden_states_output(A : Union[str, Any] , A : int , A : List[Any] ):
lowercase_ : int = model_class(A )
lowercase_ : Optional[Any] = model(**self._prepare_for_class(A , A ) )
lowercase_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Any = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : List[str] = layer_type
lowercase_ : Tuple = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[Any] = True
check_hidden_states_output(A , A , A )
def A ( self : Optional[int] ) -> Tuple:
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def A ( self : List[str] ) -> Optional[int]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = TFResNetModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase ( ):
lowercase_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def A ( self : Any ) -> Optional[int]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Any ) -> Optional[int]:
lowercase_ : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase_ : List[Any] = self.default_image_processor
lowercase_ : Dict = prepare_img()
lowercase_ : List[str] = image_processor(images=A , return_tensors='''tf''' )
# forward pass
lowercase_ : Tuple = model(**A )
# verify the logits
lowercase_ : Optional[int] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A )
lowercase_ : Optional[Any] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A , atol=1e-4 ) )
| 33
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
__UpperCAmelCase :Tuple = list[list[float | int]]
def _a ( _lowercase : Matrix , _lowercase : Matrix ):
'''simple docstring'''
__UpperCAmelCase : int = len(_lowercase )
__UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowercase )]
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : float
for row in range(_lowercase ):
for col in range(_lowercase ):
__UpperCAmelCase : Optional[Any] = matrix[row][col]
__UpperCAmelCase : List[str] = vector[row][0]
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Tuple = 0
while row < size and col < size:
# pivoting
__UpperCAmelCase : Tuple = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowercase , _lowercase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowercase ):
__UpperCAmelCase : Tuple = augmented[rowa][col] / augmented[row][col]
__UpperCAmelCase : Union[str, Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowercase ):
for row in range(_lowercase ):
__UpperCAmelCase : Optional[Any] = augmented[row][col] / augmented[col][col]
for cola in range(_lowercase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowercase )
]
def _a ( _lowercase : list[int] ):
'''simple docstring'''
__UpperCAmelCase : int = len(_lowercase )
__UpperCAmelCase : Matrix = [[0 for _ in range(_lowercase )] for _ in range(_lowercase )]
__UpperCAmelCase : Matrix = [[0] for _ in range(_lowercase )]
__UpperCAmelCase : Matrix
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : int
for x_val, y_val in enumerate(_lowercase ):
for col in range(_lowercase ):
__UpperCAmelCase : int = (x_val + 1) ** (size - col - 1)
__UpperCAmelCase : Tuple = y_val
__UpperCAmelCase : Optional[int] = solve(_lowercase , _lowercase )
def interpolated_func(_lowercase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowercase ) )
return interpolated_func
def _a ( _lowercase : int ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def _a ( _lowercase : Callable[[int], int] = question_function , _lowercase : int = 10 ):
'''simple docstring'''
__UpperCAmelCase : list[int] = [func(_lowercase ) for x_val in range(1 , order + 1 )]
__UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__UpperCAmelCase : int = 0
__UpperCAmelCase : Callable[[int], int]
__UpperCAmelCase : int
for poly in polynomials:
__UpperCAmelCase : int = 1
while func(_lowercase ) == poly(_lowercase ):
x_val += 1
ret += poly(_lowercase )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 240
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCAmelCase :str = logging.get_logger(__name__)
__UpperCAmelCase :int = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__UpperCAmelCase :List[str] = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__UpperCAmelCase :Optional[Any] = {"facebook/blenderbot-3B": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__UpperCAmelCase : Optional[Any] = bs[:]
__UpperCAmelCase : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowercase )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Dict = [chr(_lowercase ) for n in cs]
return dict(zip(_lowercase , _lowercase ) )
def _a ( _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = set()
__UpperCAmelCase : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Tuple = char
return pairs
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : str , snake_case : Dict , snake_case : str , snake_case : Optional[int]="replace" , snake_case : Tuple="<s>" , snake_case : Dict="</s>" , snake_case : Any="</s>" , snake_case : Any="<s>" , snake_case : List[Any]="<unk>" , snake_case : int="<pad>" , snake_case : List[Any]="<mask>" , snake_case : List[Any]=False , **snake_case : Any , ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token
__UpperCAmelCase : Union[str, Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token
__UpperCAmelCase : str = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token
__UpperCAmelCase : Union[str, Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token
__UpperCAmelCase : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token
__UpperCAmelCase : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : List[str] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
errors=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , **snake_case , )
with open(snake_case , encoding='''utf-8''' ) as vocab_handle:
__UpperCAmelCase : Tuple = json.load(snake_case )
__UpperCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : List[str] = errors # how to handle errors in decoding
__UpperCAmelCase : Any = bytes_to_unicode()
__UpperCAmelCase : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case , encoding='''utf-8''' ) as merges_handle:
__UpperCAmelCase : str = merges_handle.read().split('''\n''' )[1:-1]
__UpperCAmelCase : int = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : str = dict(zip(snake_case , range(len(snake_case ) ) ) )
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : Optional[Any] = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
return len(self.encoder )
def lowerCamelCase__ ( self : Tuple ) -> int:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase__ ( self : List[str] , snake_case : Optional[Any] ) -> int:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : Optional[Any] = tuple(snake_case )
__UpperCAmelCase : Optional[int] = get_pairs(snake_case )
if not pairs:
return token
while True:
__UpperCAmelCase : Optional[int] = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : List[str] = 0
while i < len(snake_case ):
try:
__UpperCAmelCase : Optional[Any] = word.index(snake_case , snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Optional[Any] = j
if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Union[str, Any] = tuple(snake_case )
__UpperCAmelCase : Optional[Any] = new_word
if len(snake_case ) == 1:
break
else:
__UpperCAmelCase : Optional[int] = get_pairs(snake_case )
__UpperCAmelCase : str = ''' '''.join(snake_case )
__UpperCAmelCase : Optional[int] = word
return word
def lowerCamelCase__ ( self : Tuple , snake_case : Optional[Any] ) -> List[str]:
__UpperCAmelCase : List[Any] = []
for token in re.findall(self.pat , snake_case ):
__UpperCAmelCase : List[Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case ).split(''' ''' ) )
return bpe_tokens
def lowerCamelCase__ ( self : Dict , snake_case : List[str] ) -> Optional[int]:
return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self : Any , snake_case : int ) -> Optional[int]:
return self.decoder.get(snake_case )
def lowerCamelCase__ ( self : Optional[int] , snake_case : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = ''''''.join(snake_case )
__UpperCAmelCase : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowerCamelCase__ ( self : List[str] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(snake_case ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCAmelCase : str = os.path.join(
snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase : int = os.path.join(
snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + '''\n''' )
__UpperCAmelCase : Optional[int] = 0
with open(snake_case , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
__UpperCAmelCase : Union[str, Any] = token_index
writer.write(''' '''.join(snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCamelCase__ ( self : Optional[Any] , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
def lowerCamelCase__ ( self : List[Any] , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : List[str] = [self.sep_token_id]
__UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : List[str] , snake_case : Dict=False , **snake_case : Union[str, Any] ) -> List[str]:
__UpperCAmelCase : Optional[int] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Tuple = ''' ''' + text
return (text, kwargs)
def lowerCamelCase__ ( self : Tuple , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> int:
return token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self : Dict , snake_case : "Conversation" ) -> List[int]:
__UpperCAmelCase : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(snake_case )
__UpperCAmelCase : List[str] = ''' '''.join(snake_case )
__UpperCAmelCase : Tuple = self.encode(snake_case )
if len(snake_case ) > self.model_max_length:
__UpperCAmelCase : List[str] = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 240
| 1
|
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def a__ ( _UpperCamelCase : Union[str, Any] ):
print('''Loading config file...''' )
def flatten_yaml_as_dict(_UpperCamelCase : Dict ,_UpperCamelCase : str="" ,_UpperCamelCase : int="." ):
__lowerCamelCase = []
for k, v in d.items():
__lowerCamelCase = parent_key + sep + k if parent_key else k
if isinstance(lowerCAmelCase_ ,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowerCAmelCase_ ,lowerCAmelCase_ ,sep=lowerCAmelCase_ ).items() )
else:
items.append((new_key, v) )
return dict(lowerCAmelCase_ )
__lowerCamelCase = argparse.Namespace()
with open(lowerCAmelCase_ ,'''r''' ) as yaml_file:
try:
__lowerCamelCase = yaml.load(lowerCAmelCase_ ,Loader=yaml.FullLoader )
__lowerCamelCase = flatten_yaml_as_dict(lowerCAmelCase_ )
for k, v in flat_cfg.items():
setattr(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(lowerCAmelCase_ ,str(lowerCAmelCase_ ) ) )
return config
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : Tuple ):
__lowerCamelCase = MobileViTVaConfig()
__lowerCamelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
__lowerCamelCase = 10_00
if int(task_name.strip().split('''_''' )[-1] ) == 3_84:
__lowerCamelCase = 3_84
else:
__lowerCamelCase = 2_56
__lowerCamelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
__lowerCamelCase = 2_10_00
if int(task_name.strip().split('''_''' )[-1] ) == 3_84:
__lowerCamelCase = 3_84
else:
__lowerCamelCase = 2_56
__lowerCamelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
__lowerCamelCase = 1_51
__lowerCamelCase = 5_12
__lowerCamelCase = '''ade20k-id2label.json'''
__lowerCamelCase = True
elif task_name.startswith('''voc_''' ):
__lowerCamelCase = 21
__lowerCamelCase = 5_12
__lowerCamelCase = '''pascal-voc-id2label.json'''
__lowerCamelCase = True
# orig_config
__lowerCamelCase = load_orig_config_file(lowerCAmelCase_ )
assert getattr(lowerCAmelCase_ ,'''model.classification.name''' ,-1 ) == "mobilevit_v2", "Invalid model"
__lowerCamelCase = getattr(lowerCAmelCase_ ,'''model.classification.mitv2.width_multiplier''' ,1.0 )
assert (
getattr(lowerCAmelCase_ ,'''model.classification.mitv2.attn_norm_layer''' ,-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__lowerCamelCase = getattr(lowerCAmelCase_ ,'''model.classification.activation.name''' ,'''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__lowerCamelCase = getattr(lowerCAmelCase_ ,'''model.segmentation.output_stride''' ,16 )
if "_deeplabv3" in task_name:
__lowerCamelCase = getattr(lowerCAmelCase_ ,'''model.segmentation.deeplabv3.aspp_rates''' ,[12, 24, 36] )
__lowerCamelCase = getattr(lowerCAmelCase_ ,'''model.segmentation.deeplabv3.aspp_out_channels''' ,5_12 )
__lowerCamelCase = getattr(lowerCAmelCase_ ,'''model.segmentation.deeplabv3.aspp_dropout''' ,0.1 )
# id2label
__lowerCamelCase = '''huggingface/label-files'''
__lowerCamelCase = json.load(open(hf_hub_download(lowerCAmelCase_ ,lowerCAmelCase_ ,repo_type='''dataset''' ) ,'''r''' ) )
__lowerCamelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Dict ,_UpperCamelCase : int ):
__lowerCamelCase = dct.pop(lowerCAmelCase_ )
__lowerCamelCase = val
def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Tuple=False ):
if base_model:
__lowerCamelCase = ''''''
else:
__lowerCamelCase = '''mobilevitv2.'''
__lowerCamelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__lowerCamelCase = k[8:]
else:
__lowerCamelCase = k
if ".block." in k:
__lowerCamelCase = k_new.replace('''.block.''' ,'''.''' )
if ".conv." in k:
__lowerCamelCase = k_new.replace('''.conv.''' ,'''.convolution.''' )
if ".norm." in k:
__lowerCamelCase = k_new.replace('''.norm.''' ,'''.normalization.''' )
if "conv_1." in k:
__lowerCamelCase = k_new.replace('''conv_1.''' ,F"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if F"""layer_{i}.""" in k:
__lowerCamelCase = k_new.replace(F"""layer_{i}.""" ,F"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
__lowerCamelCase = k_new.replace('''.exp_1x1.''' ,'''.expand_1x1.''' )
if ".red_1x1." in k:
__lowerCamelCase = k_new.replace('''.red_1x1.''' ,'''.reduce_1x1.''' )
for i in [3, 4, 5]:
if F"""layer_{i}.0.""" in k:
__lowerCamelCase = k_new.replace(F"""layer_{i}.0.""" ,F"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if F"""layer_{i}.1.local_rep.0.""" in k:
__lowerCamelCase = k_new.replace(F"""layer_{i}.1.local_rep.0.""" ,F"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if F"""layer_{i}.1.local_rep.1.""" in k:
__lowerCamelCase = k_new.replace(F"""layer_{i}.1.local_rep.1.""" ,F"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
__lowerCamelCase = [0, 1]
elif i == 4:
__lowerCamelCase = [0, 1, 2, 3]
elif i == 5:
__lowerCamelCase = [0, 1, 2]
for j in j_in:
if F"""layer_{i}.1.global_rep.{j}.""" in k:
__lowerCamelCase = k_new.replace(
F"""layer_{i}.1.global_rep.{j}.""" ,F"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if F"""layer_{i}.1.global_rep.{j+1}.""" in k:
__lowerCamelCase = k_new.replace(
F"""layer_{i}.1.global_rep.{j+1}.""" ,F"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if F"""layer_{i}.1.conv_proj.""" in k:
__lowerCamelCase = k_new.replace(F"""layer_{i}.1.conv_proj.""" ,F"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
__lowerCamelCase = k_new.replace('''pre_norm_attn.0.''' ,'''layernorm_before.''' )
if "pre_norm_attn.1." in k:
__lowerCamelCase = k_new.replace('''pre_norm_attn.1.''' ,'''attention.''' )
if "pre_norm_ffn.0." in k:
__lowerCamelCase = k_new.replace('''pre_norm_ffn.0.''' ,'''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
__lowerCamelCase = k_new.replace('''pre_norm_ffn.1.''' ,'''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
__lowerCamelCase = k_new.replace('''pre_norm_ffn.3.''' ,'''ffn.conv2.''' )
if "classifier.1." in k:
__lowerCamelCase = k_new.replace('''classifier.1.''' ,'''classifier.''' )
if "seg_head." in k:
__lowerCamelCase = k_new.replace('''seg_head.''' ,'''segmentation_head.''' )
if ".aspp_layer." in k:
__lowerCamelCase = k_new.replace('''.aspp_layer.''' ,'''.''' )
if ".aspp_pool." in k:
__lowerCamelCase = k_new.replace('''.aspp_pool.''' ,'''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def a__ ( _UpperCamelCase : List[str] ):
__lowerCamelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(lowerCAmelCase_ )
for k in keys_to_ignore:
state_dict.pop(lowerCAmelCase_ ,lowerCAmelCase_ )
def a__ ( ):
__lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__lowerCamelCase = Image.open(requests.get(lowerCAmelCase_ ,stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : Tuple ,_UpperCamelCase : Any ):
__lowerCamelCase = get_mobilevitva_config(lowerCAmelCase_ ,lowerCAmelCase_ )
# load original state_dict
__lowerCamelCase = torch.load(lowerCAmelCase_ ,map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
__lowerCamelCase = MobileViTVaForSemanticSegmentation(lowerCAmelCase_ ).eval()
__lowerCamelCase = False
else:
__lowerCamelCase = MobileViTVaForImageClassification(lowerCAmelCase_ ).eval()
__lowerCamelCase = False
# remove and rename some keys of load the original model
__lowerCamelCase = checkpoint
remove_unused_keys(lowerCAmelCase_ )
__lowerCamelCase = create_rename_keys(lowerCAmelCase_ ,base_model=lowerCAmelCase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# load modified state_dict
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__lowerCamelCase = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 )
__lowerCamelCase = image_processor(images=prepare_img() ,return_tensors='''pt''' )
__lowerCamelCase = model(**lowerCAmelCase_ )
# verify classification model
if task_name.startswith('''imagenet''' ):
__lowerCamelCase = outputs.logits
__lowerCamelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__lowerCamelCase = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] ,lowerCAmelCase_ ,atol=1e-4 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
a_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 330
|
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : Optional[int] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Tuple = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Tuple = list(range(2 , n + 1 ) )
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : str = 0
# filters actual prime numbers.
lowerCAmelCase_ : Optional[int] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : int = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : List[Any] = 2
lowerCAmelCase_ : Optional[int] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Dict = 0
# prime factorization of 'number'
lowerCAmelCase_ : Any = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : List[Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Dict = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : str = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : int = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : Tuple = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Tuple = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : int = 0
while numbera != 0:
lowerCAmelCase_ : str = numbera % numbera
lowerCAmelCase_ : List[Any] = numbera
lowerCAmelCase_ : Any = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Union[str, Any] = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : Union[str, Any] = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Union[str, Any] = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : Optional[Any] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Any = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Union[str, Any] = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Union[str, Any] = ans
ans += fiba
lowerCAmelCase_ : Optional[Any] = tmp
return ans
| 262
| 0
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase__ = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , a_ : Optional[Any] , a_ : Tuple=16 , a_ : List[str]=13 , a_ : str=7 , a_ : List[str]=14 , a_ : Dict=10 , a_ : Any=19 , a_ : Tuple=5 , a_ : Any=4 , a_ : int=True , a_ : Optional[Any]=16 , a_ : Dict=2 , a_ : Tuple=4 , a_ : Any=4 , a_ : List[Any]="gelu" , a_ : Union[str, Any]=0.1 , a_ : Tuple=0.1 , a_ : Tuple=[1, 2, 3, 4, 5] , a_ : str=25 , a_ : List[str]=5 , ):
lowerCAmelCase_ : List[str] = d_model
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : Tuple = batch_size
lowerCAmelCase_ : str = prediction_length
lowerCAmelCase_ : Tuple = context_length
lowerCAmelCase_ : Any = cardinality
lowerCAmelCase_ : Tuple = num_time_features
lowerCAmelCase_ : Union[str, Any] = lags_sequence
lowerCAmelCase_ : Tuple = embedding_dimension
lowerCAmelCase_ : Tuple = is_training
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : Tuple = hidden_act
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : List[Any] = context_length
lowerCAmelCase_ : Optional[int] = prediction_length + label_length
lowerCAmelCase_ : Optional[int] = label_length
lowerCAmelCase_ : Optional[Any] = moving_average
lowerCAmelCase_ : Optional[int] = autocorrelation_factor
def lowerCamelCase ( self : Optional[Any] ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCamelCase ( self : int , a_ : Optional[Any] ):
lowerCAmelCase_ : Tuple = config.context_length + max(config.lags_sequence )
lowerCAmelCase_ : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCAmelCase_ : int = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCAmelCase_ : str = floats_tensor([self.batch_size, _past_length] )
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCAmelCase_ : Dict = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCAmelCase_ : Tuple = floats_tensor([self.batch_size, config.prediction_length] )
lowerCAmelCase_ : Any = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : Union[str, Any] = self.get_config()
lowerCAmelCase_ : Union[str, Any] = self.prepare_autoformer_inputs_dict(a_ )
return config, inputs_dict
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase ( self : Union[str, Any] , a_ : Optional[Any] , a_ : List[str] ):
lowerCAmelCase_ : Optional[Any] = AutoformerModel(config=a_ ).to(a_ ).eval()
lowerCAmelCase_ : List[str] = model(**a_ )
lowerCAmelCase_ : Union[str, Any] = outputs.encoder_last_hidden_state
lowerCAmelCase_ : Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ : Optional[int] = model.get_encoder()
encoder.save_pretrained(a_ )
lowerCAmelCase_ : Dict = AutoformerEncoder.from_pretrained(a_ ).to(a_ )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = model.create_network_inputs(**a_ )
lowerCAmelCase_ , lowerCAmelCase_ : str = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCAmelCase_ : Tuple = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCAmelCase_ : Optional[int] = encoder(inputs_embeds=a_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
lowerCAmelCase_ : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCAmelCase_ : Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCAmelCase_ : str = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCAmelCase_ : Optional[int] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ : str = model.get_decoder()
decoder.save_pretrained(a_ )
lowerCAmelCase_ : int = AutoformerDecoder.from_pretrained(a_ ).to(a_ )
lowerCAmelCase_ : List[str] = decoder(
trend=a_ , inputs_embeds=a_ , encoder_hidden_states=a_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __lowerCamelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Any = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a_ : str = (AutoformerForPrediction,) if is_torch_available() else ()
a_ : Tuple = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
a_ : Optional[int] = False
a_ : List[Any] = False
a_ : Tuple = False
a_ : List[Any] = False
a_ : Tuple = False
a_ : Tuple = False
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Dict = AutoformerModelTester(self )
lowerCAmelCase_ : Union[str, Any] = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def lowerCamelCase ( self : Any ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a_ )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = model_class.from_pretrained(a_ , output_loading_info=a_ )
self.assertEqual(info["missing_keys"] , [] )
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*a_ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowerCamelCase ( self : Optional[Any] ):
pass
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : str = inspect.signature(getattr(a_ , "forward" ) )
# The main input is the name of the argument after `self`
lowerCAmelCase_ : str = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , a_ )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : str = model_class(a_ )
lowerCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : Any = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(a_ )] , a_ )
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : Optional[int] = getattr(self.model_tester , "seq_length" , a_ )
lowerCAmelCase_ : str = getattr(self.model_tester , "decoder_seq_length" , a_ )
lowerCAmelCase_ : str = getattr(self.model_tester , "encoder_seq_length" , a_ )
lowerCAmelCase_ : Optional[int] = getattr(self.model_tester , "d_model" , a_ )
lowerCAmelCase_ : Any = getattr(self.model_tester , "num_attention_heads" , a_ )
lowerCAmelCase_ : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Optional[int] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(a_ , a_ ) )
lowerCAmelCase_ : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : List[str] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(**self._prepare_for_class(a_ , a_ ) )
lowerCAmelCase_ : Dict = outputs.encoder_attentions
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCAmelCase_ : List[Any] = len(a_ )
lowerCAmelCase_ : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(a_ , a_ )
# decoder attentions
lowerCAmelCase_ : str = outputs.decoder_attentions
self.assertIsInstance(a_ , (list, tuple) )
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCAmelCase_ : Any = outputs.cross_attentions
self.assertIsInstance(a_ , (list, tuple) )
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : Optional[int] = model(**self._prepare_for_class(a_ , a_ ) )
self.assertEqual(out_len + 2 , len(a_ ) )
lowerCAmelCase_ : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCamelCase ( self : Optional[Any] ):
super().test_retain_grad_hidden_states_attentions()
def __lowerCamelCase ( __UpperCamelCase="train-batch.pt" ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : Any = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__UpperCamelCase , repo_type="dataset" )
lowerCAmelCase_ : Optional[int] = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )
return batch
@require_torch
@slow
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : List[Any] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(a_ )
lowerCAmelCase_ : List[str] = prepare_batch()
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
lowerCAmelCase_ : Any = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , a_ )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=a_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , a_ , atol=a_ ) )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(a_ )
lowerCAmelCase_ : str = prepare_batch("val-batch.pt" )
with torch.no_grad():
lowerCAmelCase_ : Union[str, Any] = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
lowerCAmelCase_ : Optional[int] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , a_ )
lowerCAmelCase_ : Dict = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=a_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , a_ , atol=a_ ) )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : List[str] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(a_ )
lowerCAmelCase_ : Union[str, Any] = prepare_batch("val-batch.pt" )
with torch.no_grad():
lowerCAmelCase_ : Dict = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
lowerCAmelCase_ : Optional[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , a_ )
lowerCAmelCase_ : List[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=a_ )
lowerCAmelCase_ : List[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , a_ , rtol=1e-1 ) )
| 161
|
"""simple docstring"""
import qiskit
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> qiskit.result.counts.Counts:
"""simple docstring"""
lowerCAmelCase_ : int = qiskit.Aer.get_backend("aer_simulator" )
lowerCAmelCase_ : List[Any] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
lowerCAmelCase_ : Tuple = qiskit.execute(__UpperCamelCase , __UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__UpperCamelCase )
if __name__ == "__main__":
lowercase__ = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 161
| 1
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = "Hello, World!"
_UpperCamelCase = "en_XX"
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Union[str, Any] = Path('''data_bin''' )
__lowerCAmelCase : str = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(lowercase__ ).parent ) , checkpoint_file=Path(lowercase__ ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(lowercase__ ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(lowercase__ ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(lowercase__ )
__lowerCAmelCase : Dict = xmod.model.encoder.sentence_encoder
__lowerCAmelCase : int = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowerCAmelCase : Any = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , lowercase__ )
__lowerCAmelCase : Any = XmodForSequenceClassification(lowercase__ ) if classification_head else XmodForMaskedLM(lowercase__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCAmelCase : Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
__lowerCAmelCase : Tuple = xmod_sent_encoder.embed_positions.weight
__lowerCAmelCase : List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowerCAmelCase : List[Any] = xmod_sent_encoder.layernorm_embedding.weight
__lowerCAmelCase : Dict = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCAmelCase : List[str] = model.roberta.encoder.layer[i]
__lowerCAmelCase : Tuple = xmod_sent_encoder.layers[i]
# self attention
__lowerCAmelCase : Any = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
__lowerCAmelCase : List[str] = xmod_layer.self_attn.q_proj.weight
__lowerCAmelCase : int = xmod_layer.self_attn.q_proj.bias
__lowerCAmelCase : Optional[int] = xmod_layer.self_attn.k_proj.weight
__lowerCAmelCase : List[str] = xmod_layer.self_attn.k_proj.bias
__lowerCAmelCase : Optional[int] = xmod_layer.self_attn.v_proj.weight
__lowerCAmelCase : List[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowerCAmelCase : Tuple = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
__lowerCAmelCase : Dict = xmod_layer.self_attn.out_proj.weight
__lowerCAmelCase : Optional[int] = xmod_layer.self_attn.out_proj.bias
__lowerCAmelCase : Optional[int] = xmod_layer.self_attn_layer_norm.weight
__lowerCAmelCase : Tuple = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowerCAmelCase : List[Any] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
__lowerCAmelCase : Tuple = xmod_layer.fca.weight
__lowerCAmelCase : Optional[int] = xmod_layer.fca.bias
# output
__lowerCAmelCase : Tuple = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
__lowerCAmelCase : Dict = xmod_layer.fca.weight
__lowerCAmelCase : str = xmod_layer.fca.bias
__lowerCAmelCase : str = xmod_layer.final_layer_norm.weight
__lowerCAmelCase : List[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowerCAmelCase : Tuple = xmod_layer.adapter_layer_norm.weight
__lowerCAmelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowerCAmelCase : List[str] = bert_output.adapter_modules[lang_code]
__lowerCAmelCase : Optional[int] = xmod_layer.adapter_modules[lang_code]
__lowerCAmelCase : Tuple = from_adapter.fca.weight
__lowerCAmelCase : Dict = from_adapter.fca.bias
__lowerCAmelCase : Tuple = from_adapter.fca.weight
__lowerCAmelCase : Optional[int] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowerCAmelCase : List[Any] = xmod_sent_encoder.layer_norm.weight
__lowerCAmelCase : Dict = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowerCAmelCase : Optional[int] = xmod.model.classification_heads['''mnli'''].dense.weight
__lowerCAmelCase : Optional[Any] = xmod.model.classification_heads['''mnli'''].dense.bias
__lowerCAmelCase : Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight
__lowerCAmelCase : Optional[int] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
__lowerCAmelCase : Union[str, Any] = xmod.model.encoder.lm_head.dense.weight
__lowerCAmelCase : Union[str, Any] = xmod.model.encoder.lm_head.dense.bias
__lowerCAmelCase : int = xmod.model.encoder.lm_head.layer_norm.weight
__lowerCAmelCase : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
__lowerCAmelCase : Union[str, Any] = xmod.model.encoder.lm_head.weight
__lowerCAmelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCAmelCase : Any = xmod.encode(lowercase__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(lowercase__ )
__lowerCAmelCase : Any = model(lowercase__ )[0]
if classification_head:
__lowerCAmelCase : List[Any] = xmod.model.classification_heads['''mnli'''](xmod.extract_features(lowercase__ ) )
else:
__lowerCAmelCase : List[str] = xmod.model(lowercase__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowerCAmelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowerCAmelCase : Tuple = torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(lowercase__ ).mkdir(parents=lowercase__ , exist_ok=lowercase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
_UpperCamelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 275
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_UpperCamelCase = logging.get_logger(__name__)
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
def constraint_to_multiple_of(lowercase__ , lowercase__ , lowercase__=0 , lowercase__=None ):
__lowerCAmelCase : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
__lowerCAmelCase : Any = math.ceil(val / multiple ) * multiple
return x
__lowerCAmelCase : Dict = (output_size, output_size) if isinstance(lowercase__ , lowercase__ ) else output_size
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = get_image_size(lowercase__ )
__lowerCAmelCase, __lowerCAmelCase : int = output_size
# determine new height and width
__lowerCAmelCase : Optional[Any] = output_height / input_height
__lowerCAmelCase : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__lowerCAmelCase : str = scale_width
else:
# fit height
__lowerCAmelCase : str = scale_height
__lowerCAmelCase : Any = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase__ )
__lowerCAmelCase : Union[str, Any] = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase__ )
return (new_height, new_width)
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = ["""pixel_values"""]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = False , A_ = 1 , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , **A_ , ) ->None:
'''simple docstring'''
super().__init__(**A_ )
__lowerCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 384, '''width''': 384}
__lowerCAmelCase : Dict = get_size_dict(A_ )
__lowerCAmelCase : Optional[Any] = do_resize
__lowerCAmelCase : int = size
__lowerCAmelCase : Dict = keep_aspect_ratio
__lowerCAmelCase : List[Any] = ensure_multiple_of
__lowerCAmelCase : Tuple = resample
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Any = rescale_factor
__lowerCAmelCase : List[Any] = do_normalize
__lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , A_ , A_ , A_ = False , A_ = 1 , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) ->np.ndarray:
'''simple docstring'''
__lowerCAmelCase : int = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__lowerCAmelCase : Union[str, Any] = get_resize_output_image_size(
A_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=A_ , multiple=A_ , )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ = None , **A_ , ) ->Dict:
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ = None , **A_ , ) ->np.ndarray:
'''simple docstring'''
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) ->PIL.Image.Image:
'''simple docstring'''
__lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase : Optional[int] = size if size is not None else self.size
__lowerCAmelCase : Union[str, Any] = get_size_dict(A_ )
__lowerCAmelCase : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__lowerCAmelCase : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__lowerCAmelCase : Tuple = resample if resample is not None else self.resample
__lowerCAmelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std
__lowerCAmelCase : Optional[Any] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCAmelCase : Any = [to_numpy_array(A_ ) for image in images]
if do_resize:
__lowerCAmelCase : Optional[Any] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_rescale:
__lowerCAmelCase : Tuple = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
__lowerCAmelCase : str = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
__lowerCAmelCase : Union[str, Any] = [to_channel_dimension_format(A_ , A_ ) for image in images]
__lowerCAmelCase : Dict = {'''pixel_values''': images}
return BatchFeature(data=A_ , tensor_type=A_ )
def UpperCamelCase__ ( self , A_ , A_ = None ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(A_ ):
__lowerCAmelCase : Optional[int] = target_sizes.numpy()
__lowerCAmelCase : List[str] = []
for idx in range(len(A_ ) ):
__lowerCAmelCase : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_ )
__lowerCAmelCase : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
__lowerCAmelCase : Any = logits.argmax(dim=1 )
__lowerCAmelCase : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 275
| 1
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class a ( unittest.TestCase , __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
_A = load_tool("""text-classification""" )
self.tool.setup()
_A = load_tool("""text-classification""" , remote=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(lowerCAmelCase_ , """positive""" )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(lowerCAmelCase_ , """positive""" )
def UpperCAmelCase ( self ) -> str:
_A = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(lowerCAmelCase_ , """positive""" )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(lowerCAmelCase_ , """positive""" )
| 363
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 81
| 0
|
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 78
|
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
snake_case_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __a (lowerCamelCase ):
def __init__( self : str , __magic_name__ : CLIPSegForImageSegmentation , __magic_name__ : CLIPSegProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> str:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ : Dict = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , __magic_name__ , standard_warn=__magic_name__ )
UpperCAmelCase_ : Optional[int] = dict(scheduler.config )
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : List[str] = FrozenDict(__magic_name__ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ : Dict = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , __magic_name__ , standard_warn=__magic_name__ )
UpperCAmelCase_ : Dict = dict(scheduler.config )
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Tuple = FrozenDict(__magic_name__ )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=__magic_name__ , segmentation_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> List[str]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def UpperCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase_ : Tuple = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__magic_name__ , __magic_name__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__magic_name__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Union[str, Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : Union[torch.FloatTensor, PIL.Image.Image] , __magic_name__ : str , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
UpperCAmelCase_ : int = self.segmentation_model(**__magic_name__ )
UpperCAmelCase_ : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(__magic_name__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , )
| 125
| 0
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Dict=56 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : List[str]=99 , _UpperCAmelCase : Tuple=32 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : int=7 , _UpperCAmelCase : Optional[Any]="gelu_new" , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : int=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Optional[int]="block_sparse" , _UpperCAmelCase : Any=True , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Dict=3 , ) -> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
lowercase__ = rescale_embeddings
lowercase__ = attention_type
lowercase__ = use_bias
lowercase__ = block_size
lowercase__ = num_random_blocks
def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
A__ = False
A__ = False
def lowerCamelCase__ (self : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCamelCase__ (self : Union[str, Any] ) -> str:
"""simple docstring"""
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCamelCase__ (self : Dict ) -> Tuple:
"""simple docstring"""
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCamelCase__ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
super().test_hidden_states_output()
@slow
def lowerCamelCase__ (self : Tuple ) -> int:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(_UpperCAmelCase )
def lowerCamelCase__ (self : Dict ) -> Dict:
"""simple docstring"""
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCamelCase__ (self : List[str] ) -> str:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = model_class(_UpperCAmelCase )
@jax.jit
def model_jitted(_UpperCAmelCase : int , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : Dict ):
return model(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , **_UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
lowercase__ = model_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowercase__ = model_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ (self : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=1E-5 , _UpperCAmelCase : Any="outputs" , _UpperCAmelCase : int=None ) -> int:
"""simple docstring"""
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
| 146
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
A : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : List[str] , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : CLIPTextModel , _UpperCAmelCase : CLIPTokenizer , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _UpperCAmelCase : StableDiffusionSafetyChecker , _UpperCAmelCase : CLIPImageProcessor , ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[Union[str, int]] = "auto" ) -> Tuple:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.enable_attention_slicing(_UpperCAmelCase )
@torch.no_grad()
def __call__(self : Any , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : float = 7.5 , _UpperCAmelCase : Optional[Union[str, List[str]]] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : Optional[torch.Generator] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[torch.FloatTensor] = None , **_UpperCAmelCase : Any , ) -> Tuple:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = 1
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = len(_UpperCAmelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(_UpperCAmelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(_UpperCAmelCase )}.''' )
# get prompt text embeddings
lowercase__ = self.tokenizer(
_UpperCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1 , _UpperCAmelCase , 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt , _UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [""""""]
elif type(_UpperCAmelCase ) is not type(_UpperCAmelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(_UpperCAmelCase )} !='''
f''' {type(_UpperCAmelCase )}.''' )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(_UpperCAmelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(_UpperCAmelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
_UpperCAmelCase , padding="""max_length""" , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" , )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(_UpperCAmelCase , _UpperCAmelCase , 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , _UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(
_UpperCAmelCase , generator=_UpperCAmelCase , device="""cpu""" , dtype=_UpperCAmelCase ).to(self.device )
lowercase__ = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device="""cpu""" , dtype=_UpperCAmelCase ).to(
self.device )
else:
lowercase__ = torch.randn(
_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
lowercase__ = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowercase__ = latents_reference.to(self.device )
lowercase__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowercase__ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowercase__ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowercase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowercase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowercase__ = 0 if dx < 0 else dx
lowercase__ = 0 if dy < 0 else dy
lowercase__ = max(-dx , 0 )
lowercase__ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowercase__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
lowercase__ = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__ = 1 / 0.18_215 * latents
lowercase__ = self.vae.decode(_UpperCAmelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowercase__ = self.feature_extractor(self.numpy_to_pil(_UpperCAmelCase ) , return_tensors="""pt""" ).to(
self.device )
lowercase__ , lowercase__ = self.safety_checker(
images=_UpperCAmelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowercase__ = None
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase )
| 146
| 1
|
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
UpperCAmelCase__ = data_utils.TransfoXLTokenizer
UpperCAmelCase__ = data_utils.TransfoXLCorpus
UpperCAmelCase__ = data_utils
UpperCAmelCase__ = data_utils
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(lowercase ,"""rb""" ) as fp:
_UpperCAmelCase = pickle.load(lowercase ,encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_UpperCAmelCase = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' )
_UpperCAmelCase = corpus.vocab.__dict__
torch.save(lowercase ,lowercase )
_UpperCAmelCase = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" ,lowercase )
_UpperCAmelCase = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(f'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(lowercase ,lowercase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_UpperCAmelCase = os.path.abspath(lowercase )
_UpperCAmelCase = os.path.abspath(lowercase )
print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_UpperCAmelCase = TransfoXLConfig()
else:
_UpperCAmelCase = TransfoXLConfig.from_json_file(lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
_UpperCAmelCase = TransfoXLLMHeadModel(lowercase )
_UpperCAmelCase = load_tf_weights_in_transfo_xl(lowercase ,lowercase ,lowercase )
# Save pytorch-model
_UpperCAmelCase = os.path.join(lowercase ,lowercase )
_UpperCAmelCase = os.path.join(lowercase ,lowercase )
print(f'''Save PyTorch model to {os.path.abspath(lowercase )}''' )
torch.save(model.state_dict() ,lowercase )
print(f'''Save configuration file to {os.path.abspath(lowercase )}''' )
with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
UpperCAmelCase__ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 289
|
"""simple docstring"""
UpperCAmelCase__ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
UpperCAmelCase__ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 1_2,
"""Pm""": 1_5,
"""Em""": 1_8,
"""Zm""": 2_1,
"""Ym""": 2_4,
}
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = from_type.lower().strip("""s""" )
_UpperCAmelCase = to_type.lower().strip("""s""" )
_UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase )
_UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase )
if from_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(lowercase )}'''
)
raise ValueError(lowercase )
if to_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(lowercase )}'''
)
raise ValueError(lowercase )
_UpperCAmelCase = METRIC_CONVERSION[from_sanitized]
_UpperCAmelCase = METRIC_CONVERSION[to_sanitized]
_UpperCAmelCase = 1
if from_exponent > to_exponent:
_UpperCAmelCase = from_exponent - to_exponent
else:
_UpperCAmelCase = -(to_exponent - from_exponent)
return value * pow(10 ,lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 289
| 1
|
class A :
"""simple docstring"""
def __init__( self : int,lowercase_ : list[int] )-> None:
'''simple docstring'''
A__ = len(lowercase_ )
A__ = [0] * len_array
if len_array > 0:
A__ = array[0]
for i in range(1,lowercase_ ):
A__ = self.prefix_sum[i - 1] + array[i]
def snake_case__ ( self : Optional[Any],lowercase_ : int,lowercase_ : int )-> int:
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def snake_case__ ( self : List[Any],lowercase_ : int )-> bool:
'''simple docstring'''
A__ = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowercase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282
|
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 1000 ) -> int:
'''simple docstring'''
A__ = 3
A__ = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 282
| 1
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : str = logging.get_logger(__name__)
lowerCAmelCase__ : int = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase__ : Tuple = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
lowerCAmelCase__ : Optional[int] = {
'''facebook/bart-base''': 10_24,
'''facebook/bart-large''': 10_24,
'''facebook/bart-large-mnli''': 10_24,
'''facebook/bart-large-cnn''': 10_24,
'''facebook/bart-large-xsum''': 10_24,
'''yjernite/bart_eli5''': 10_24,
}
@lru_cache()
def UpperCamelCase__ ( ) -> Dict:
snake_case__ : Tuple = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
snake_case__ : Any = bs[:]
snake_case__ : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
snake_case__ : List[Any] = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def UpperCamelCase__ ( A__ ) -> Any:
snake_case__ : Union[str, Any] = set()
snake_case__ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : Any = char
return pairs
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="replace" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase=False , **__UpperCamelCase , ) -> List[str]:
'''simple docstring'''
snake_case__ : int = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
snake_case__ : Tuple = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
snake_case__ : Any = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
snake_case__ : Optional[int] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
snake_case__ : Optional[int] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
snake_case__ : Optional[Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : Tuple = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
with open(__UpperCamelCase , encoding='utf-8' ) as vocab_handle:
snake_case__ : Optional[int] = json.load(__UpperCamelCase )
snake_case__ : Tuple = {v: k for k, v in self.encoder.items()}
snake_case__ : int = errors # how to handle errors in decoding
snake_case__ : Optional[int] = bytes_to_unicode()
snake_case__ : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCamelCase , encoding='utf-8' ) as merges_handle:
snake_case__ : Optional[Any] = merges_handle.read().split('\n' )[1:-1]
snake_case__ : Optional[Any] = [tuple(merge.split() ) for merge in bpe_merges]
snake_case__ : Union[str, Any] = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
snake_case__ : Any = {}
snake_case__ : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ : str = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def __a ( self ) -> Any:
'''simple docstring'''
return len(self.encoder )
def __a ( self ) -> int:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self , __UpperCamelCase ) -> Any:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case__ : str = tuple(__UpperCamelCase )
snake_case__ : Union[str, Any] = get_pairs(__UpperCamelCase )
if not pairs:
return token
while True:
snake_case__ : Union[str, Any] = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ , snake_case__ : Dict = bigram
snake_case__ : Union[str, Any] = []
snake_case__ : Dict = 0
while i < len(__UpperCamelCase ):
try:
snake_case__ : Union[str, Any] = word.index(__UpperCamelCase , __UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : str = j
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : Dict = tuple(__UpperCamelCase )
snake_case__ : int = new_word
if len(__UpperCamelCase ) == 1:
break
else:
snake_case__ : List[str] = get_pairs(__UpperCamelCase )
snake_case__ : int = ' '.join(__UpperCamelCase )
snake_case__ : Tuple = word
return word
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
snake_case__ : Dict = []
for token in re.findall(self.pat , __UpperCamelCase ):
snake_case__ : Union[str, Any] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCamelCase ).split(' ' ) )
return bpe_tokens
def __a ( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
return self.decoder.get(__UpperCamelCase )
def __a ( self , __UpperCamelCase ) -> str:
'''simple docstring'''
snake_case__ : Dict = ''.join(__UpperCamelCase )
snake_case__ : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ : Optional[int] = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ : Optional[int] = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + '\n' )
snake_case__ : Union[str, Any] = 0
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
snake_case__ : Tuple = token_index
writer.write(' '.join(__UpperCamelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Optional[int] = [self.cls_token_id]
snake_case__ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = [self.sep_token_id]
snake_case__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[Any] = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCamelCase ) > 0 and not text[0].isspace()):
snake_case__ : int = ' ' + text
return (text, kwargs)
| 143
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCAmelCase__ : List[str] = False
try:
lowerCAmelCase__ : str = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class __snake_case :
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = [] ) -> int:
'''simple docstring'''
snake_case__ : List[str] = 0
snake_case__ : List[str] = choices
snake_case__ : str = prompt
if sys.platform == "win32":
snake_case__ : str = '*'
else:
snake_case__ : Tuple = '➔ '
def __a ( self , __UpperCamelCase , __UpperCamelCase = "" ) -> Optional[int]:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __UpperCamelCase )
else:
forceWrite(self.choices[index] , __UpperCamelCase )
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
if index == self.position:
forceWrite(F""" {self.arrow_char} """ )
self.write_choice(__UpperCamelCase )
else:
forceWrite(F""" {self.choices[index]}""" )
reset_cursor()
def __a ( self , __UpperCamelCase , __UpperCamelCase = 1 ) -> Any:
'''simple docstring'''
snake_case__ : Optional[int] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__UpperCamelCase )
move_cursor(__UpperCamelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def __a ( self ) -> List[Any]:
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def __a ( self ) -> List[str]:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def __a ( self ) -> str:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__UpperCamelCase )] for number in range(10 )] )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(chr(self.current_selection ) )
snake_case__ : Union[str, Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __UpperCamelCase )
else:
return
else:
return
def __a ( self , __UpperCamelCase = 0 ) -> str:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
snake_case__ : Optional[Any] = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__UpperCamelCase )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
snake_case__ : Union[str, Any] = int(builtins.input() )
except ValueError:
snake_case__ : Dict = default_choice
else:
snake_case__ : List[Any] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(__UpperCamelCase , '\n' )
return choice
| 143
| 1
|
__snake_case : Dict ='Tobias Carryer'
from time import time
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase=int(time() ) ) -> List[Any]: # noqa: B008
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = multiplier
lowerCAmelCase__ : int = increment
lowerCAmelCase__ : Optional[Any] = modulo
lowerCAmelCase__ : Dict = seed
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__snake_case : Tuple =LinearCongruentialGenerator(1_6_6_4_5_2_5, 1_0_1_3_9_0_4_2_2_3, 2 << 3_1)
while True:
print(lcg.next_number())
| 94
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__snake_case : Dict =HfArgumentParser(InitializationArguments)
__snake_case : Tuple =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__snake_case : Optional[int] =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__snake_case : List[str] ={
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
__snake_case : List[Any] =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__snake_case : int =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 94
| 1
|
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 2
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : int ) -> List[str]:
_a : Any = """laion/clap-htsat-unfused"""
_a : Union[str, Any] = tempfile.mkdtemp()
def _lowercase ( self : List[Any] , **UpperCAmelCase__ : Any ) -> Dict:
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def _lowercase ( self : List[Any] , **UpperCAmelCase__ : List[str] ) -> int:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : List[str] ) -> Optional[int]:
_a : List[str] = self.get_tokenizer()
_a : Any = self.get_feature_extractor()
_a : Optional[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
_a : List[str] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> Optional[int]:
_a : Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_a : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_a : Union[str, Any] = self.get_feature_extractor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def _lowercase ( self : List[str] ) -> Optional[Any]:
_a : Optional[int] = self.get_feature_extractor()
_a : Tuple = self.get_tokenizer()
_a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Any = floats_list((3, 1000) )
_a : List[Any] = feature_extractor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(audios=UpperCAmelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self : Tuple ) -> Optional[int]:
_a : List[str] = self.get_feature_extractor()
_a : Any = self.get_tokenizer()
_a : Any = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Optional[int] = """This is a test string"""
_a : Tuple = processor(text=UpperCAmelCase__ )
_a : int = tokenizer(UpperCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : List[Any] ) -> Any:
_a : str = self.get_feature_extractor()
_a : List[str] = self.get_tokenizer()
_a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : Dict = processor.batch_decode(UpperCAmelCase__ )
_a : Any = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> List[str]:
_a : str = self.get_feature_extractor()
_a : Optional[Any] = self.get_tokenizer()
_a : Union[str, Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 294
| 0
|
from bisect import bisect
from itertools import accumulate
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Any:
snake_case : Tuple = sorted(zip(snake_case__ ,snake_case__ ) ,key=lambda lowercase : x[0] / x[1] ,reverse=snake_case__ )
snake_case : str = [i[0] for i in r], [i[1] for i in r]
snake_case : Optional[Any] = list(accumulate(snake_case__ ) )
snake_case : Dict = bisect(snake_case__ ,snake_case__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase=8 ) -> Optional[int]:
snake_case : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A , A , ) -> int:
super().__init__()
self.register_modules(
unet=A , scheduler=A , movq=A , )
snake_case : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self , A , A , A , A , A , A ) -> str:
if latents is None:
snake_case : List[str] = randn_tensor(A , generator=A , device=A , dtype=A )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
snake_case : Union[str, Any] = latents.to(A )
snake_case : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self , A=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
snake_case : Dict = torch.device(f"""cuda:{gpu_id}""" )
snake_case : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A , A )
def UpperCAmelCase ( self , A=0 ) -> Optional[Any]:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
snake_case : int = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case : Tuple = cpu_offload_with_hook(A , A , prev_module_hook=A )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self ) -> str:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A )
def __call__( self , A , A , A , A = 5_1_2 , A = 5_1_2 , A = 1_0_0 , A = 4.0 , A = 1 , A = None , A = None , A = "pil" , A = True , ) -> Any:
snake_case : int = self._execution_device
snake_case : Tuple = guidance_scale > 1.0
if isinstance(A , A ):
snake_case : str = torch.cat(A , dim=0 )
if isinstance(A , A ):
snake_case : int = torch.cat(A , dim=0 )
if isinstance(A , A ):
snake_case : Tuple = torch.cat(A , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Optional[int] = image_embeds.repeat_interleave(A , dim=0 )
snake_case : Dict = negative_image_embeds.repeat_interleave(A , dim=0 )
snake_case : Optional[Any] = hint.repeat_interleave(A , dim=0 )
snake_case : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A )
snake_case : Optional[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A )
self.scheduler.set_timesteps(A , device=A )
snake_case : Tuple = self.scheduler.timesteps
snake_case : Tuple = self.movq.config.latent_channels
snake_case , snake_case : Optional[int] = downscale_height_and_width(A , A , self.movq_scale_factor )
# create initial latent
snake_case : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A , A , A , self.scheduler , )
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
snake_case : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Any = {"""image_embeds""": image_embeds, """hint""": hint}
snake_case : List[Any] = self.unet(
sample=A , timestep=A , encoder_hidden_states=A , added_cond_kwargs=A , return_dict=A , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Tuple = noise_pred.chunk(2 )
snake_case , snake_case : Optional[Any] = variance_pred.chunk(2 )
snake_case : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : Dict = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : Dict = self.scheduler.step(
A , A , A , generator=A , )[0]
# post-processing
snake_case : Tuple = self.movq.decode(A , force_not_quantize=A )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
snake_case : Optional[int] = image * 0.5 + 0.5
snake_case : Union[str, Any] = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : Tuple = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 176
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def __lowerCamelCase ( ) -> Generator[int, None, None]:
UpperCAmelCase : dict[int, int] = {}
UpperCAmelCase : List[Any] = 2
while True:
UpperCAmelCase : str = factor_map.pop(_lowercase , _lowercase )
if factor:
UpperCAmelCase : List[str] = factor + prime
while x in factor_map:
x += factor
UpperCAmelCase : List[str] = factor
else:
UpperCAmelCase : int = prime
yield prime
prime += 1
def __lowerCamelCase ( _lowercase = 1e10 ) -> int:
UpperCAmelCase : Optional[Any] = sieve()
UpperCAmelCase : str = 1
while True:
UpperCAmelCase : Optional[int] = next(_lowercase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_lowercase )
n += 2
if __name__ == "__main__":
print(solution())
| 265
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
def __init__( self , A , A=12 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=512 , A=0.0_2 , A=0 , A=None , ) -> Any:
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : int = use_input_mask
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : str = hidden_size
UpperCAmelCase : List[Any] = projection_dim
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Any = dropout
UpperCAmelCase : List[Any] = attention_dropout
UpperCAmelCase : Optional[Any] = max_position_embeddings
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = scope
UpperCAmelCase : Union[str, Any] = bos_token_id
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase : Tuple = input_mask.numpy()
UpperCAmelCase , UpperCAmelCase : int = input_mask.shape
UpperCAmelCase : Optional[int] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(A ):
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : int = self.get_config()
return config, input_ids, tf.convert_to_tensor(A )
def _lowercase( self ) -> int:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _lowercase( self , A , A , A ) -> Union[str, Any]:
UpperCAmelCase : int = TFBlipTextModel(config=A )
UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , training=A )
UpperCAmelCase : int = model(A , training=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs
UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = (TFBlipTextModel,) if is_tf_available() else ()
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = BlipTextModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> Tuple:
self.config_tester.run_common_tests()
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> List[str]:
pass
def _lowercase( self ) -> Optional[int]:
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def _lowercase( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _lowercase( self ) -> Optional[int]:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _lowercase( self ) -> Dict:
pass
@slow
def _lowercase( self ) -> Dict:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = TFBlipTextModel.from_pretrained(A )
self.assertIsNotNone(A )
def _lowercase( self , A=True ) -> str:
super().test_pt_tf_model_equivalence(allow_missing_keys=A )
| 265
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = "rwkv"
__snake_case : Optional[int] = {"max_position_embeddings": "context_length"}
def __init__( self: Optional[int] , UpperCAmelCase_: List[str]=50_277 , UpperCAmelCase_: str=1_024 , UpperCAmelCase_: Optional[int]=4_096 , UpperCAmelCase_: Tuple=32 , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: List[Any]=None , UpperCAmelCase_: Optional[int]=1E-5 , UpperCAmelCase_: List[Any]=0 , UpperCAmelCase_: str=0 , UpperCAmelCase_: Optional[Any]=6 , UpperCAmelCase_: Optional[int]=False , UpperCAmelCase_: Union[str, Any]=True , **UpperCAmelCase_: Union[str, Any] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = context_length
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = attention_hidden_size if attention_hidden_size is not None else hidden_size
_SCREAMING_SNAKE_CASE = intermediate_size if intermediate_size is not None else 4 * hidden_size
_SCREAMING_SNAKE_CASE = layer_norm_epsilon
_SCREAMING_SNAKE_CASE = rescale_every
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = bos_token_id
_SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(
tie_word_embeddings=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
| 125
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __UpperCAmelCase :
def __init__( self: Tuple , UpperCAmelCase_: Tuple , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = 13
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 99
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 37
_SCREAMING_SNAKE_CASE = """gelu"""
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 512
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 0.02
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase ( self: int , UpperCAmelCase_: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = TFEsmModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ )
# Also check the case where encoder outputs are not passed
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFEsmForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Tuple = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : List[str] = False
__snake_case : Union[str, Any] = False
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_SCREAMING_SNAKE_CASE = model.get_bias()
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
for k, v in name.items():
assert isinstance(UpperCAmelCase_ , tf.Variable )
else:
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
assert x is None
_SCREAMING_SNAKE_CASE = model.get_bias()
assert name is None
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , UpperCAmelCase_ )
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 125
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
_a : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_a : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
_a : Union[str, Any] = ''
else:
_a : Optional[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
_a : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_a : Union[str, Any] = in_proj_bias[: config.hidden_size]
_a : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a : Dict = in_proj_weight[
-config.hidden_size :, :
]
_a : Optional[Any] = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
_a : int = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_a : Tuple = dct.pop(lowerCAmelCase_ )
_a : Optional[Any] = val
def __lowerCamelCase ( ) -> Any:
_a : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a : List[Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_a : Any = ViTConfig()
_a : List[Any] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_a : List[Any] = True
_a : str = int(vit_name[-12:-10] )
_a : Any = int(vit_name[-9:-6] )
else:
_a : str = 1000
_a : List[Any] = 'huggingface/label-files'
_a : int = 'imagenet-1k-id2label.json'
_a : Any = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) )
_a : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_a : str = idalabel
_a : int = {v: k for k, v in idalabel.items()}
_a : List[Any] = int(vit_name[-6:-4] )
_a : Optional[int] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
_a : Optional[int] = 192
_a : Dict = 768
_a : List[Any] = 12
_a : Union[str, Any] = 3
elif vit_name[9:].startswith('small' ):
_a : Optional[Any] = 384
_a : str = 1536
_a : str = 12
_a : Union[str, Any] = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
_a : int = 768
_a : str = 2304
_a : List[str] = 8
_a : Optional[int] = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
_a : Any = 1024
_a : Optional[int] = 4096
_a : Union[str, Any] = 24
_a : Any = 16
elif vit_name[4:].startswith('huge' ):
_a : str = 1280
_a : Dict = 5120
_a : str = 32
_a : str = 16
# load original model from timm
_a : Union[str, Any] = timm.create_model(lowerCAmelCase_ , pretrained=lowerCAmelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_a : str = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
_a : Optional[Any] = create_rename_keys(lowerCAmelCase_ , lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_a : Union[str, Any] = ViTModel(lowerCAmelCase_ ).eval()
else:
_a : str = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_a : Any = DeiTImageProcessor(size=config.image_size )
else:
_a : Union[str, Any] = ViTImageProcessor(size=config.image_size )
_a : List[str] = image_processor(images=prepare_img() , return_tensors='pt' )
_a : str = encoding['pixel_values']
_a : List[str] = model(lowerCAmelCase_ )
if base_model:
_a : Optional[Any] = timm_model.forward_features(lowerCAmelCase_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCAmelCase_ , outputs.pooler_output , atol=1E-3 )
else:
_a : Union[str, Any] = timm_model(lowerCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_ , outputs.logits , atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 89
|
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] ,_UpperCAmelCase : Tuple=[2, 2, 3, 2] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Any=["stage2", "stage3", "stage4"] ,_UpperCAmelCase : Any=[2, 3, 4] ,_UpperCAmelCase : Tuple=None ,):
_a : Optional[Any] = parent
_a : List[Any] = batch_size
_a : str = image_size
_a : Union[str, Any] = num_channels
_a : List[Any] = num_stages
_a : Dict = hidden_sizes
_a : int = depths
_a : Tuple = is_training
_a : List[str] = use_labels
_a : Dict = intermediate_size
_a : int = hidden_act
_a : int = num_labels
_a : Any = initializer_range
_a : Tuple = out_features
_a : int = out_indices
_a : List[Any] = scope
def __lowercase ( self : Dict ):
_a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Union[str, Any] = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.num_labels )
_a : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Any ):
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Any = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ):
_a : List[Any] = ConvNextVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Dict = model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a : Tuple = None
_a : List[Any] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowercase ( self : Optional[Any] ):
_a : Any = self.prepare_config_and_inputs()
_a , _a , _a : Union[str, Any] = config_and_inputs
_a : Any = {'pixel_values': pixel_values}
return config, inputs_dict
def __lowercase ( self : str ):
_a : Tuple = self.prepare_config_and_inputs()
_a , _a , _a : Tuple = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase : str = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : int = False
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Optional[int] = False
def __lowercase ( self : List[Any] ):
_a : str = ConvNextVaModelTester(self )
_a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : str ):
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def __lowercase ( self : List[Any] ):
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def __lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def __lowercase ( self : Any ):
pass
def __lowercase ( self : List[str] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Any = True
if model_class.__name__ in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]:
continue
_a : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : str = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[int] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : str ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Optional[int] = False
_a : Tuple = True
if (
model_class.__name__
in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_a : Tuple = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_a : Any = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : List[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : List[Any] ):
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(_UpperCAmelCase )
_a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Dict = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : Any ):
def check_hidden_states_output(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ):
_a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : str = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : List[Any] ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def __lowercase ( self : int ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = ConvNextVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCamelCase ( ) -> List[Any]:
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def __lowercase ( self : Any ):
_a : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCAmelCase )
_a : Optional[int] = self.default_image_processor
_a : str = prepare_img()
_a : str = preprocessor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Dict = model(**_UpperCAmelCase )
# verify the logits
_a : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
_a : Optional[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
| 89
| 1
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( UpperCAmelCase__ ):
lowerCamelCase : Tuple = ['input_features']
def __init__( self : Dict , UpperCAmelCase__ : List[str]=8_0 , UpperCAmelCase__ : Dict=1_6_0_0_0 , UpperCAmelCase__ : Optional[Any]=1_6_0 , UpperCAmelCase__ : Dict=3_0 , UpperCAmelCase__ : Tuple=4_0_0 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : List[str]=False , **UpperCAmelCase__ : List[str] , ) -> Dict:
super().__init__(
feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase = n_fft
lowerCAmelCase = hop_length
lowerCAmelCase = chunk_length
lowerCAmelCase = chunk_length * sampling_rate
lowerCAmelCase = self.n_samples // hop_length
lowerCAmelCase = sampling_rate
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_SCREAMING_SNAKE_CASE , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Any ) -> np.ndarray:
lowerCAmelCase = spectrogram(
_SCREAMING_SNAKE_CASE , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = np.maximum(_SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __UpperCAmelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
lowerCAmelCase = np.array(_SCREAMING_SNAKE_CASE , np.intaa )
lowerCAmelCase = []
for vector, length in zip(_SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int = True , UpperCAmelCase__ : Tuple = None , UpperCAmelCase__ : List[str] = None , UpperCAmelCase__ : str = None , UpperCAmelCase__ : int = "max_length" , UpperCAmelCase__ : Union[str, Any] = None , UpperCAmelCase__ : Tuple = None , UpperCAmelCase__ : List[Any] = None , **UpperCAmelCase__ : Optional[int] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCAmelCase = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
lowerCAmelCase = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech] ).T]
lowerCAmelCase = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
lowerCAmelCase = self.pad(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
lowerCAmelCase = np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
lowerCAmelCase = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
lowerCAmelCase = [self._np_extract_fbank_features(_SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return padded_inputs
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict[str, Any]:
lowerCAmelCase = copy.deepcopy(self.__dict__ )
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 359
|
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def a_ ( lowerCamelCase : Iterable[str] , lowerCamelCase : int ):
lowerCAmelCase = iter(lowerCamelCase )
while True:
lowerCAmelCase = tuple(itertools.islice(lowerCamelCase , lowerCamelCase ) )
if not chunk:
return
yield chunk
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
lowerCAmelCase = ''
if len(lowerCamelCase ) < 2:
return dirty
for i in range(len(lowerCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowerCamelCase ) & 1:
clean += "X"
return clean
def a_ ( lowerCamelCase : str ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
lowerCAmelCase = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowerCAmelCase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowerCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowerCamelCase )
return table
def a_ ( lowerCamelCase : str , lowerCamelCase : str ):
lowerCAmelCase = generate_table(lowerCamelCase )
lowerCAmelCase = prepare_input(lowerCamelCase )
lowerCAmelCase = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCamelCase , 2 ):
lowerCAmelCase , lowerCAmelCase = divmod(table.index(lowerCamelCase ) , 5 )
lowerCAmelCase , lowerCAmelCase = divmod(table.index(lowerCamelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def a_ ( lowerCamelCase : str , lowerCamelCase : str ):
lowerCAmelCase = generate_table(lowerCamelCase )
lowerCAmelCase = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCamelCase , 2 ):
lowerCAmelCase , lowerCAmelCase = divmod(table.index(lowerCamelCase ) , 5 )
lowerCAmelCase , lowerCAmelCase = divmod(table.index(lowerCamelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 55
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 338
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 338
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""nielsr/canine-s""": 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
lowerCamelCase__ = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowerCamelCase__ = 0
lowerCamelCase__ = 0XE0_00
lowerCamelCase__ = 0XE0_01
lowerCamelCase__ = 0XE0_02
lowerCamelCase__ = 0XE0_03
lowerCamelCase__ = 0XE0_04
# Maps special codepoints to human-readable names.
lowerCamelCase__ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowerCamelCase__ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A__ ( _lowerCamelCase):
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=20_48 , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Union[str, Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else bos_token
__lowerCAmelCase : Optional[Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else eos_token
__lowerCAmelCase : Any = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else sep_token
__lowerCAmelCase : Union[str, Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cls_token
__lowerCAmelCase : Any = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase : Any = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , model_max_length=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Creates a mapping for looking up the IDs of special symbols.
__lowerCAmelCase : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__lowerCAmelCase : Optional[int] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__lowerCAmelCase : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__lowerCAmelCase : List[Any] = UNICODE_VOCAB_SIZE
__lowerCAmelCase : List[str] = len(self._special_codepoints )
@property
def __lowerCamelCase ( self ):
return self._unicode_vocab_size
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return list(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
try:
return ord(_SCREAMING_SNAKE_CASE )
except TypeError:
raise ValueError(f"invalid token: '{token}'" )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_SCREAMING_SNAKE_CASE )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return "".join(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : int = [self.sep_token_id]
__lowerCAmelCase : Dict = [self.cls_token_id]
__lowerCAmelCase : Optional[int] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
result += ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return result
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
__lowerCAmelCase : str = [self.cls_token_id]
__lowerCAmelCase : int = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
return ()
| 182
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_lowerCamelCase)
class A__ ( _lowerCamelCase):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
A_ : str = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True})
A_ : ClassVar[Features] = Features({'text': Value('string')})
A_ : ClassVar[Features] = Features({'labels': ClassLabel})
A_ : str = "text"
A_ : str = "labels"
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , _SCREAMING_SNAKE_CASE ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
__lowerCAmelCase : Any = copy.deepcopy(self )
__lowerCAmelCase : Dict = self.label_schema.copy()
__lowerCAmelCase : List[Any] = features[self.label_column]
__lowerCAmelCase : Dict = label_schema
return task_template
@property
def __lowerCamelCase ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 182
| 1
|
import os
import sys
lowercase : List[str] = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase : Optional[int] = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def A_ ( *A__ , **A__ ) -> str:
return AutoConfig.from_pretrained(*A__ , **A__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def A_ ( *A__ , **A__ ) -> Tuple:
return AutoTokenizer.from_pretrained(*A__ , **A__ )
@add_start_docstrings(AutoModel.__doc__ )
def A_ ( *A__ , **A__ ) -> Any:
return AutoModel.from_pretrained(*A__ , **A__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def A_ ( *A__ , **A__ ) -> Tuple:
return AutoModelForCausalLM.from_pretrained(*A__ , **A__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def A_ ( *A__ , **A__ ) -> Optional[Any]:
return AutoModelForMaskedLM.from_pretrained(*A__ , **A__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def A_ ( *A__ , **A__ ) -> Dict:
return AutoModelForSequenceClassification.from_pretrained(*A__ , **A__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def A_ ( *A__ , **A__ ) -> Dict:
return AutoModelForQuestionAnswering.from_pretrained(*A__ , **A__ )
| 99
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a__ = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
UpperCAmelCase__ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCAmelCase__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __lowercase ( self ) -> List[Any]:
_a : List[Any] = self.task_name.lower()
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = "train"
UpperCAmelCase__ : List[str] = "dev"
UpperCAmelCase__ : Union[str, Any] = "test"
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : GlueDataTrainingArguments
UpperCAmelCase__ : str
UpperCAmelCase__ : List[InputFeatures]
def __init__( self , _a , _a , _a = None , _a = Split.train , _a = None , ) -> str:
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , _a , )
_a : List[str] = args
_a : List[str] = glue_processors[args.task_name]()
_a : List[Any] = glue_output_modes[args.task_name]
if isinstance(_a , _a ):
try:
_a : Any = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
_a : Union[str, Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_a : str = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_a , _a : Optional[int] = label_list[2], label_list[1]
_a : Dict = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_a ):
if os.path.exists(_a ) and not args.overwrite_cache:
_a : List[Any] = time.time()
_a : str = torch.load(_a )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_a : str = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_a : Any = self.processor.get_test_examples(args.data_dir )
else:
_a : List[str] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_a : Dict = examples[:limit_length]
_a : Union[str, Any] = glue_convert_examples_to_features(
_a , _a , max_length=args.max_seq_length , label_list=_a , output_mode=self.output_mode , )
_a : Optional[int] = time.time()
torch.save(self.features , _a )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ) -> Optional[int]:
return len(self.features )
def __getitem__( self , _a ) -> InputFeatures:
return self.features[i]
def __lowercase ( self ) -> Tuple:
return self.label_list
| 235
| 0
|
from scipy.stats import pearsonr
import datasets
lowercase_ = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
lowercase_ = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
lowercase_ = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def __lowerCamelCase ( self :Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) ,reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] ,)
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :Optional[int] ,__lowercase :Dict ,__lowercase :List[str]=False ):
if return_pvalue:
snake_case__ : List[Any] = pearsonr(__lowercase ,__lowercase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__lowercase ,__lowercase )[0] )}
| 353
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Dict = TransfoXLTokenizer
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : List[str] = False
def __lowerCamelCase ( self :Union[str, Any] ):
super().setUp()
snake_case__ : Optional[int] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCamelCase ( self :int ,**__lowercase :Any ):
snake_case__ : str = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname ,**__lowercase )
def __lowerCamelCase ( self :int ,__lowercase :Optional[int] ):
snake_case__ : int = '''<unk> UNwanted , running'''
snake_case__ : List[Any] = '''<unk> unwanted, running'''
return input_text, output_text
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[Any] = TransfoXLTokenizer(vocab_file=self.vocab_file ,lower_case=__lowercase )
snake_case__ : Tuple = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(__lowercase ,['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) ,[0, 4, 8, 7] )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[Any] = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Optional[Any] = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Any = TransfoXLTokenizer(lower_case=__lowercase )
snake_case__ : List[str] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
snake_case__ : Union[str, Any] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(__lowercase ) ,__lowercase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ) ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Any = self.get_tokenizer()
snake_case__ : Optional[Any] = len(__lowercase )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' ,1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowercase ) ,original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) ,[1] )
self.assertEqual(tokenizer.decode([1] ) ,'''new1''' )
| 44
| 0
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def a ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = TapasConfig.from_json_file(_UpperCAmelCase )
# set absolute/relative position embeddings parameter
__UpperCAmelCase : Tuple = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=_UpperCAmelCase )
elif task == "WTQ":
# run_task_main.py hparams
__UpperCAmelCase : Union[str, Any] = 4
__UpperCAmelCase : int = True
# hparam_utils.py hparams
__UpperCAmelCase : List[str] = 0.66_46_94
__UpperCAmelCase : List[str] = 0.20_79_51
__UpperCAmelCase : List[str] = 0.12_11_94
__UpperCAmelCase : str = True
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[Any] = 0.0_35_25_13
__UpperCAmelCase : Tuple = TapasForQuestionAnswering(config=_UpperCAmelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__UpperCAmelCase : List[Any] = 4
__UpperCAmelCase : Tuple = False
# hparam_utils.py hparams
__UpperCAmelCase : Optional[int] = 36.45_19
__UpperCAmelCase : Dict = 0.90_34_21
__UpperCAmelCase : List[str] = 2_22.0_88
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Any = True
__UpperCAmelCase : List[Any] = 0.76_31_41
__UpperCAmelCase : Union[str, Any] = TapasForQuestionAnswering(config=_UpperCAmelCase )
elif task == "TABFACT":
__UpperCAmelCase : Optional[Any] = TapasForSequenceClassification(config=_UpperCAmelCase )
elif task == "MLM":
__UpperCAmelCase : Optional[int] = TapasForMaskedLM(config=_UpperCAmelCase )
elif task == "INTERMEDIATE_PRETRAINING":
__UpperCAmelCase : Dict = TapasModel(config=_UpperCAmelCase )
else:
raise ValueError(f'Task {task} not supported.' )
print(f'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model (weights and configuration)
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_UpperCAmelCase )
# Save tokenizer files
print(f'Save tokenizer files to {pytorch_dump_path}' )
__UpperCAmelCase : List[str] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=5_12 )
tokenizer.save_pretrained(_UpperCAmelCase )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A =parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 226
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = k_size // 2
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__UpperCAmelCase : Any = 1 / (2 * pi * sigma) * exp(-(square(_UpperCAmelCase ) + square(_UpperCAmelCase )) / (2 * square(_UpperCAmelCase )) )
return g
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
__UpperCAmelCase : str = height - k_size + 1
__UpperCAmelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__UpperCAmelCase : str = zeros((dst_height * dst_width, k_size * k_size) )
__UpperCAmelCase : Optional[Any] = 0
for i, j in product(range(_UpperCAmelCase ) , range(_UpperCAmelCase ) ):
__UpperCAmelCase : int = ravel(image[i : i + k_size, j : j + k_size] )
__UpperCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
__UpperCAmelCase : Tuple = gen_gaussian_kernel(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : List[Any] = ravel(_UpperCAmelCase )
# reshape and get the dst image
__UpperCAmelCase : Optional[Any] = dot(_UpperCAmelCase , _UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase )
return dst
if __name__ == "__main__":
# read original image
__A =imread(R"../image_data/lena.jpg")
# turn image in gray scale value
__A =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__A =gaussian_filter(gray, 3, sigma=1)
__A =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 226
| 1
|
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a__ ( unittest.TestCase ):
def lowercase ( self : Any ) -> List[Any]:
lowercase : Dict = 0
@slow
def lowercase ( self : List[Any] ) -> Optional[int]:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowercase : Tuple = AutoTokenizer.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowerCAmelCase ), 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowercase : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowerCAmelCase ), 0 )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
lowercase : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size, 12 )
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase : int = AutoTokenizer.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size, 20 )
def lowercase ( self : Optional[Any] ) -> Dict:
lowercase : Dict = AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
# Check that tokenizer_type ≠ model_type
lowercase : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase, config=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size, 12 )
def lowercase ( self : str ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt', os.path.join(lowerCAmelCase, 'vocab.txt' ) )
lowercase : List[str] = AutoTokenizer.from_pretrained(lowerCAmelCase, tokenizer_type='bert', use_fast=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json', os.path.join(lowerCAmelCase, 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt', os.path.join(lowerCAmelCase, 'merges.txt' ) )
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase, tokenizer_type='gpt2', use_fast=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
@require_tokenizers
def lowercase ( self : List[str] ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt', os.path.join(lowerCAmelCase, 'vocab.txt' ) )
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase, tokenizer_type='bert' )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json', os.path.join(lowerCAmelCase, 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt', os.path.join(lowerCAmelCase, 'merges.txt' ) )
lowercase : str = AutoTokenizer.from_pretrained(lowerCAmelCase, tokenizer_type='gpt2' )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : Any ) -> Dict:
with pytest.raises(lowerCAmelCase ):
AutoTokenizer.from_pretrained('./', tokenizer_type='xxx' )
@require_tokenizers
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowercase : str = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' )
self.assertIsInstance(lowerCAmelCase, (BertTokenizer, BertTokenizerFast) )
if isinstance(lowerCAmelCase, lowerCAmelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case, lowerCAmelCase )
else:
self.assertEqual(tokenizer.do_lower_case, lowerCAmelCase )
self.assertEqual(tokenizer.model_max_length, 512 )
@require_tokenizers
def lowercase ( self : Optional[Any] ) -> List[str]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowerCAmelCase, 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier', ):
lowercase : List[str] = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' )
def lowercase ( self : int ) -> Dict:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowercase : Optional[Any] = TOKENIZER_MAPPING.values()
lowercase : str = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowerCAmelCase )
@require_tokenizers
def lowercase ( self : Tuple ) -> int:
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased', use_fast=lowerCAmelCase ), lowerCAmelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ), lowerCAmelCase )
@require_tokenizers
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase : List[str] = AutoTokenizer.from_pretrained('distilbert-base-uncased', do_lower_case=lowerCAmelCase )
lowercase : Optional[Any] = 'Hello, world. How are you?'
lowercase : Dict = tokenizer.tokenize(lowerCAmelCase )
self.assertEqual('[UNK]', tokens[0] )
lowercase : str = AutoTokenizer.from_pretrained('microsoft/mpnet-base', do_lower_case=lowerCAmelCase )
lowercase : Any = tokenizer.tokenize(lowerCAmelCase )
self.assertEqual('[UNK]', tokens[0] )
@require_tokenizers
def lowercase ( self : Union[str, Any] ) -> Tuple:
lowercase : Tuple = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' )
self.assertEqual(type(lowerCAmelCase ), lowerCAmelCase )
self.assertEqual(tokenizer.model_max_length, 512 )
self.assertEqual(tokenizer.vocab_size, 30000 )
self.assertEqual(tokenizer.unk_token, '[UNK]' )
self.assertEqual(tokenizer.padding_side, 'right' )
self.assertEqual(tokenizer.truncation_side, 'right' )
def lowercase ( self : Union[str, Any] ) -> List[str]:
lowercase : List[str] = AutoTokenizer.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase )
lowercase : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size, 12 )
def lowercase ( self : Optional[Any] ) -> Tuple:
lowercase : int = AutoTokenizer.from_pretrained('ctrl' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : int ) -> List[Any]:
# Check we can load the tokenizer config of an online model.
lowercase : Optional[Any] = get_tokenizer_config('bert-base-cased' )
lowercase : Optional[int] = config.pop('_commit_hash', lowerCAmelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowerCAmelCase, {'do_lower_case': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowercase : Union[str, Any] = get_tokenizer_config(lowerCAmelCase )
self.assertDictEqual(lowerCAmelCase, {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowercase : str = AutoTokenizer.from_pretrained(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase )
lowercase : List[str] = get_tokenizer_config(lowerCAmelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'], 'BertTokenizer' )
def lowercase ( self : List[str] ) -> Optional[int]:
try:
AutoConfig.register('custom', lowerCAmelCase )
AutoTokenizer.register(lowerCAmelCase, slow_tokenizer_class=lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase ):
AutoTokenizer.register(lowerCAmelCase, slow_tokenizer_class=lowerCAmelCase )
lowercase : str = CustomTokenizer.from_pretrained(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase )
lowercase : Tuple = AutoTokenizer.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase ( self : List[Any] ) -> str:
try:
AutoConfig.register('custom', lowerCAmelCase )
# Can register in two steps
AutoTokenizer.register(lowerCAmelCase, slow_tokenizer_class=lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig], (CustomTokenizer, None) )
AutoTokenizer.register(lowerCAmelCase, fast_tokenizer_class=lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig], (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowerCAmelCase, slow_tokenizer_class=lowerCAmelCase, fast_tokenizer_class=lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig], (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase ):
AutoTokenizer.register(lowerCAmelCase, fast_tokenizer_class=lowerCAmelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : str = BertTokenizerFast.from_pretrained(lowerCAmelCase )
bert_tokenizer.save_pretrained(lowerCAmelCase )
lowercase : str = CustomTokenizerFast.from_pretrained(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase )
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowercase : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase, use_fast=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase ( self : List[str] ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase ):
lowercase : Optional[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase ):
lowercase : Any = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer', trust_remote_code=lowerCAmelCase )
lowercase : Optional[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer', trust_remote_code=lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase )
lowercase : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase, trust_remote_code=lowerCAmelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizerFast' )
self.assertEqual(reloaded_tokenizer.__class__.__name__, 'NewTokenizerFast' )
# Test we can also load the slow version
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer', trust_remote_code=lowerCAmelCase, use_fast=lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase )
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase, trust_remote_code=lowerCAmelCase, use_fast=lowerCAmelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__, 'NewTokenizer' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer' )
self.assertEqual(reloaded_tokenizer.__class__.__name__, 'NewTokenizer' )
@require_tokenizers
def lowercase ( self : List[str] ) -> int:
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = False
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = NewTokenizer
_lowerCamelCase = False
try:
AutoConfig.register('custom', lowerCAmelCase )
AutoTokenizer.register(lowerCAmelCase, slow_tokenizer_class=lowerCAmelCase )
AutoTokenizer.register(lowerCAmelCase, fast_tokenizer_class=lowerCAmelCase )
# If remote code is not set, the default is to use local
lowercase : str = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
lowercase : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer', use_fast=lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowercase : List[Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer', trust_remote_code=lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
lowercase : Dict = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer', trust_remote_code=lowerCAmelCase, use_fast=lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer', trust_remote_code=lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizerFast' )
self.assertTrue(tokenizer.special_attribute_present )
lowercase : Optional[int] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer', trust_remote_code=lowerCAmelCase, use_fast=lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
lowercase : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy', trust_remote_code=lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizerFast' )
# Test we can also load the slow version
lowercase : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy', trust_remote_code=lowerCAmelCase, use_fast=lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer' )
def lowercase ( self : Optional[int] ) -> Any:
with self.assertRaisesRegex(
lowerCAmelCase, 'bert-base is not a local folder and is not a valid model identifier' ):
lowercase : Tuple = AutoTokenizer.from_pretrained('bert-base' )
def lowercase ( self : Tuple ) -> List[Any]:
with self.assertRaisesRegex(
lowerCAmelCase, R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowercase : str = AutoTokenizer.from_pretrained(lowerCAmelCase, revision='aaaaaa' )
def lowercase ( self : str ) -> str:
# Make sure we have cached the tokenizer.
lowercase : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
lowercase : str = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count, 0 )
self.assertEqual(counter.head_request_count, 1 )
self.assertEqual(counter.other_request_count, 0 )
| 53
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
_UpperCamelCase: str = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
_UpperCamelCase: int = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
_UpperCamelCase: Optional[Any] = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def lowercase ( self : List[str] ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ), codebase_urls=['https://www.atticusprojectai.org/cuad'], reference_urls=['https://www.atticusprojectai.org/cuad'], )
def lowercase ( self : Any, lowerCAmelCase : int, lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
lowercase : int = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
lowercase : Any = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
lowercase : int = evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase )
return score
| 53
| 1
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( _snake_case , _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = StableDiffusionDiffEditPipeline
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
SCREAMING_SNAKE_CASE__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE__ = frozenset([] )
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_lowerCamelCase , )
a :int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
a :Tuple = DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_lowerCamelCase , set_alpha_to_zero=_lowerCamelCase , )
torch.manual_seed(0 )
a :Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
a :int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
a :Optional[int] = CLIPTextModel(_lowerCamelCase )
a :str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
a :Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 ):
a :Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
a :Tuple = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
a :int = torch.manual_seed(_lowerCamelCase )
else:
a :int = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
a :Dict = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 ):
a :str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
a :str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a :Optional[Any] = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' )
if str(_lowerCamelCase ).startswith('''mps''' ):
a :Dict = torch.manual_seed(_lowerCamelCase )
else:
a :Any = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
a :Union[str, Any] = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 ):
a :List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
a :int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a :Optional[Any] = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' )
if str(_lowerCamelCase ).startswith('''mps''' ):
a :Dict = torch.manual_seed(_lowerCamelCase )
else:
a :Any = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
a :List[Any] = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
a :Any = self.get_dummy_components()
a :str = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
a :Union[str, Any] = self.get_dummy_inputs(_lowerCamelCase )
a :List[Any] = pipe(**_lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowerCamelCase )
a :int = self.pipeline_class.from_pretrained(_lowerCamelCase )
pipe_loaded.to(_lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=_lowerCamelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowerCamelCase , _lowerCamelCase ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
a :Dict = self.get_dummy_inputs(_lowerCamelCase )
a :Tuple = pipe_loaded(**_lowerCamelCase )[0]
a :List[str] = np.abs(output - output_loaded ).max()
self.assertLess(_lowerCamelCase , 1e-4 )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = '''cpu'''
a :Optional[int] = self.get_dummy_components()
a :List[str] = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :Any = self.get_dummy_mask_inputs(_lowerCamelCase )
a :str = pipe.generate_mask(**_lowerCamelCase )
a :List[str] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
a :List[Any] = np.array([0] * 9 )
a :str = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = '''cpu'''
a :List[str] = self.get_dummy_components()
a :List[Any] = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :str = self.get_dummy_inversion_inputs(_lowerCamelCase )
a :Any = pipe.invert(**_lowerCamelCase ).images
a :Union[str, Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
a :str = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
a :str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = '''cpu'''
a :str = self.get_dummy_components()
a :Union[str, Any] = {'''beta_start''': 0.0_0085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
a :Dict = DPMSolverMultistepScheduler(**_lowerCamelCase )
a :List[str] = DPMSolverMultistepInverseScheduler(**_lowerCamelCase )
a :int = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :str = self.get_dummy_inversion_inputs(_lowerCamelCase )
a :Tuple = pipe.invert(**_lowerCamelCase ).images
a :Any = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
a :int = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
a :Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1e-3 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
a :Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
a :Union[str, Any] = raw_image.convert('''RGB''' ).resize((768, 768) )
a :List[Any] = raw_image
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = torch.manual_seed(0 )
a :int = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
a :Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config )
a :Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :Optional[Any] = '''a bowl of fruit'''
a :Any = '''a bowl of pears'''
a :Any = pipe.generate_mask(
image=self.raw_image , source_prompt=_lowerCamelCase , target_prompt=_lowerCamelCase , generator=_lowerCamelCase , )
a :Dict = pipe.invert(
prompt=_lowerCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_lowerCamelCase ).latents
a :List[str] = pipe(
prompt=_lowerCamelCase , mask_image=_lowerCamelCase , image_latents=_lowerCamelCase , generator=_lowerCamelCase , negative_prompt=_lowerCamelCase , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
a :List[str] = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = torch.manual_seed(0 )
a :List[Any] = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
a :Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
a :Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :Dict = '''a bowl of fruit'''
a :Optional[Any] = '''a bowl of pears'''
a :Tuple = pipe.generate_mask(
image=self.raw_image , source_prompt=_lowerCamelCase , target_prompt=_lowerCamelCase , generator=_lowerCamelCase , )
a :Dict = pipe.invert(
prompt=_lowerCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_lowerCamelCase , num_inference_steps=25 , ).latents
a :str = pipe(
prompt=_lowerCamelCase , mask_image=_lowerCamelCase , image_latents=_lowerCamelCase , generator=_lowerCamelCase , negative_prompt=_lowerCamelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
a :List[Any] = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 94
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case : List[str] = logging.get_logger(__name__)
snake_case : int = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'blenderbot-small'
SCREAMING_SNAKE_CASE__ = ['past_key_values']
SCREAMING_SNAKE_CASE__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _lowerCamelCase=5_0265 , _lowerCamelCase=512 , _lowerCamelCase=8 , _lowerCamelCase=2048 , _lowerCamelCase=16 , _lowerCamelCase=8 , _lowerCamelCase=2048 , _lowerCamelCase=16 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="gelu" , _lowerCamelCase=512 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1 , _lowerCamelCase=False , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=2 , **_lowerCamelCase , ):
a :Dict = vocab_size
a :Optional[Any] = max_position_embeddings
a :str = d_model
a :Any = encoder_ffn_dim
a :Optional[int] = encoder_layers
a :List[str] = encoder_attention_heads
a :List[str] = decoder_ffn_dim
a :Optional[int] = decoder_layers
a :str = decoder_attention_heads
a :List[str] = dropout
a :Optional[int] = attention_dropout
a :Dict = activation_dropout
a :List[str] = activation_function
a :List[Any] = init_std
a :Optional[int] = encoder_layerdrop
a :Tuple = decoder_layerdrop
a :List[str] = use_cache
a :int = encoder_layers
a :Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
class _snake_case ( _snake_case ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
a :Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
a :Union[str, Any] = {0: '''batch'''}
a :Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
a :Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
a :str = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
a :Optional[int] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
a , a :str = self.num_layers
for i in range(_lowerCamelCase ):
a :List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
a :List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
a :Optional[int] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
a :List[Any] = super().outputs
else:
a :Union[str, Any] = super(_lowerCamelCase , self ).outputs
if self.use_past:
a , a :int = self.num_layers
for i in range(_lowerCamelCase ):
a :int = {0: '''batch''', 2: '''past_sequence + sequence'''}
a :Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
a :Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Generate decoder inputs
a :Dict = seq_length if not self.use_past else 1
a :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
a :List[Any] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
a :List[str] = dict(**_lowerCamelCase , **_lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a , a :Optional[Any] = common_inputs['''input_ids'''].shape
a :Tuple = common_inputs['''decoder_input_ids'''].shape[1]
a , a :List[Any] = self.num_attention_heads
a :List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a :int = decoder_seq_length + 3
a :Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
a :Union[str, Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase )] , dim=1 )
a :List[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
a , a :Optional[int] = self.num_layers
a :str = min(_lowerCamelCase , _lowerCamelCase )
a :str = max(_lowerCamelCase , _lowerCamelCase ) - min_num_layers
a :Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
) )
# TODO: test this.
a :int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_lowerCamelCase , _lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
a :Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a , a :Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
a :Optional[int] = seqlen + 2
a , a :Union[str, Any] = self.num_layers
a , a :Optional[Any] = self.num_attention_heads
a :str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a :Tuple = common_inputs['''attention_mask'''].dtype
a :Any = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 )
a :Any = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(_lowerCamelCase )
]
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a :Optional[Any] = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a :Optional[int] = tokenizer.num_special_tokens_to_add(_lowerCamelCase )
a :Tuple = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
a :List[str] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
a :Dict = dict(tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
a :Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
elif self.task == "causal-lm":
a :Dict = self._generate_dummy_inputs_for_causal_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
else:
a :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
return common_inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if self.task in ["default", "seq2seq-lm"]:
a :Optional[int] = super()._flatten_past_key_values_(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
a :Any = super(_lowerCamelCase , self )._flatten_past_key_values_(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 94
| 1
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
lowerCAmelCase_ = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def lowerCamelCase_ ( lowerCAmelCase: Dict=True )-> List[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=UpperCAmelCase_ ) )
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Any =None
a_ : Any =None
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : str , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
_snake_case : int = dataset_module_factory(UpperCamelCase , cache_dir=UpperCamelCase )
_snake_case : Optional[Any] = import_main_class(dataset_module.module_path , dataset=UpperCamelCase )
_snake_case : DatasetBuilder = builder_cls(
cache_dir=UpperCamelCase , config_name=UpperCamelCase , hash=dataset_module.hash , )
_snake_case : Tuple = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=UpperCamelCase ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
_snake_case : List[str] = cached_path(UpperCamelCase , cache_dir=UpperCamelCase )
self.assertTrue(os.path.exists(UpperCamelCase ) )
@pytest.mark.integration
def lowerCamelCase_ ( lowerCAmelCase: List[Any] )-> str:
_snake_case : int = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
_snake_case : Tuple = dataset_module_factory('wikipedia' , cache_dir=lowerCAmelCase )
_snake_case : str = import_main_class(dataset_module.module_path )
_snake_case : DatasetBuilder = builder_cls(
cache_dir=lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_snake_case : Dict = None
builder_instance.download_and_prepare()
_snake_case : List[str] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> Optional[Any]:
_snake_case : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=lowerCAmelCase )
_snake_case : Any = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase )
_snake_case : DatasetBuilder = builder_cls(
cache_dir=lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
_snake_case : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , lowerCAmelCase )
assert next(iter(ds['train'] ) )
| 260
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""nielsr/canine-s""": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
lowerCAmelCase_ = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0xE_000
lowerCAmelCase_ = 0xE_001
lowerCAmelCase_ = 0xE_002
lowerCAmelCase_ = 0xE_003
lowerCAmelCase_ = 0xE_004
# Maps special codepoints to human-readable names.
lowerCAmelCase_ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowerCAmelCase_ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , UpperCamelCase : int=chr(UpperCamelCase ) , UpperCamelCase : Union[str, Any]=chr(UpperCamelCase ) , UpperCamelCase : Any=chr(UpperCamelCase ) , UpperCamelCase : Union[str, Any]=chr(UpperCamelCase ) , UpperCamelCase : List[Any]=chr(UpperCamelCase ) , UpperCamelCase : List[str]=chr(UpperCamelCase ) , UpperCamelCase : int=False , UpperCamelCase : str=20_48 , **UpperCamelCase : List[str] , ):
'''simple docstring'''
_snake_case : Tuple = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token
_snake_case : Optional[Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token
_snake_case : Any = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token
_snake_case : str = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token
_snake_case : Dict = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case : str = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
bos_token=UpperCamelCase , eos_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , model_max_length=UpperCamelCase , **UpperCamelCase , )
# Creates a mapping for looking up the IDs of special symbols.
_snake_case : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
_snake_case : Tuple = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
_snake_case : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
_snake_case : str = UNICODE_VOCAB_SIZE
_snake_case : Optional[Any] = len(self._special_codepoints )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return self._unicode_vocab_size
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : str ):
'''simple docstring'''
return list(UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : str ):
'''simple docstring'''
try:
return ord(UpperCamelCase )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : int ):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
return "".join(UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = [self.sep_token_id]
_snake_case : int = [self.cls_token_id]
_snake_case : Any = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
_snake_case : int = [1] + ([0] * len(UpperCamelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase )) + [1]
return result
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : List[Any] = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
_snake_case : Tuple = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
return ()
| 260
| 1
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : List[str]=7 , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : int=False , __UpperCAmelCase : str=False , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : int=99 , __UpperCAmelCase : str=0 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : str=5 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : Union[str, Any]=512 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : List[Any]=0.02 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : str=4 , __UpperCAmelCase : Any="last" , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Tuple=0 , ) ->List[str]:
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_lengths
a = use_token_type_ids
a = use_labels
a = gelu_activation
a = sinusoidal_embeddings
a = causal
a = asm
a = n_langs
a = vocab_size
a = n_special
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = summary_type
a = use_proj
a = scope
a = bos_token_id
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_input_lengths:
a = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , 2 ).float()
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowerCAmelCase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , ) ->List[str]:
"""simple docstring"""
a = XLMModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a = model(__UpperCAmelCase , lengths=__UpperCAmelCase , langs=__UpperCAmelCase )
a = model(__UpperCAmelCase , langs=__UpperCAmelCase )
a = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , ) ->Any:
"""simple docstring"""
a = XLMWithLMHeadModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , ) ->Optional[Any]:
"""simple docstring"""
a = XLMForQuestionAnsweringSimple(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a = model(__UpperCAmelCase )
a = model(__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase )
a = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , ) ->List[Any]:
"""simple docstring"""
a = XLMForQuestionAnswering(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a = model(__UpperCAmelCase )
a = model(
__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , cls_index=__UpperCAmelCase , is_impossible=__UpperCAmelCase , p_mask=__UpperCAmelCase , )
a = model(
__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , cls_index=__UpperCAmelCase , is_impossible=__UpperCAmelCase , )
((a) , ) = result_with_labels.to_tuple()
a = model(__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase )
((a) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , ) ->Dict:
"""simple docstring"""
a = XLMForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a = model(__UpperCAmelCase )
a = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : str , ) ->Optional[Any]:
"""simple docstring"""
a = self.num_labels
a = XLMForTokenClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , ) ->str:
"""simple docstring"""
a = self.num_choices
a = XLMForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowercase_ ( lowercase , lowercase , lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__snake_case = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__snake_case = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] ) ->int:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str]=False ) ->int:
"""simple docstring"""
a = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
a = XLMModelTester(self )
a = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=37 )
def __lowerCAmelCase ( self : Dict ) ->Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__UpperCAmelCase )
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__UpperCAmelCase )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : int=False , __UpperCAmelCase : int=1 ) ->Dict:
"""simple docstring"""
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(
[isinstance(__UpperCAmelCase , __UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(__UpperCAmelCase ) )
self.assertEqual(len(__UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__UpperCAmelCase ):
# adds PAD dummy token
a = min_length + idx + 1
a = min_length + idx + 1
a = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__UpperCAmelCase ) )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=1 ) ->Dict:
"""simple docstring"""
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(
[isinstance(__UpperCAmelCase , __UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(__UpperCAmelCase ) , )
self.assertEqual(len(__UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__UpperCAmelCase ):
# adds PAD dummy token
a = min_length + idx + 1
a = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__UpperCAmelCase ) , )
pass
@slow
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = XLMModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
a = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(__UpperCAmelCase )
a = torch.tensor([[14, 447]] , dtype=torch.long , device=__UpperCAmelCase ) # the president
a = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
a = model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __UpperCAmelCase )
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class A ( __UpperCAmelCase ):
__snake_case = 'vit'
def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=224, UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=True, UpperCamelCase__=16, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = qkv_bias
lowerCAmelCase_ = encoder_stride
class A ( __UpperCAmelCase ):
__snake_case = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 1E-4
| 278
| 0
|
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase ( lowerCamelCase__ ):
'''simple docstring'''
__UpperCamelCase : Tuple = 'facebook/bart-large-mnli'
__UpperCamelCase : Tuple = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
__UpperCamelCase : Any = 'text_classifier'
__UpperCamelCase : Any = AutoTokenizer
__UpperCamelCase : Dict = AutoModelForSequenceClassification
__UpperCamelCase : Any = ['text', ['text']]
__UpperCamelCase : int = ['text']
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
super().setup()
_A: Tuple = self.model.config
_A: Dict = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
_A: Any = int(lowerCAmelCase_ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
_A: List[str] = labels
return self.pre_processor(
[text] * len(lowerCAmelCase_ ) , [F"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Optional[int] = outputs.logits
_A: str = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 356
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
UpperCAmelCase__ : Any = getLogger(__name__)
UpperCAmelCase__ : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
def lowerCamelCase__ ( a , a , a , a = 8 , a = DEFAULT_DEVICE , a=False , a="summarization" , a=None , **a , ) -> Dict:
_A: str = Path(a ).open('''w''' , encoding='''utf-8''' )
_A: Optional[Any] = str(a )
_A: Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(a ).to(a )
if fpaa:
_A: Any = model.half()
_A: Optional[int] = AutoTokenizer.from_pretrained(a )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
_A: Any = time.time()
# update config with task specific params
use_task_specific_params(a , a )
if prefix is None:
_A: int = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(a , a ) ) ):
_A: int = [prefix + text for text in examples_chunk]
_A: str = tokenizer(a , return_tensors='''pt''' , truncation=a , padding='''longest''' ).to(a )
_A: str = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **a , )
_A: str = tokenizer.batch_decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
_A: Optional[int] = int(time.time() - start_time ) # seconds
_A: Union[str, Any] = len(a )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowerCamelCase__ ( ) -> Tuple:
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def lowerCamelCase__ ( a=True ) -> Optional[Any]:
_A: str = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=a , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=a , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=a , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=a , required=a , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=a , required=a , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=a , required=a , default=a , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=a , required=a , default=a , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=a , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=a , default=8 , required=a , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=a , default=-1 , required=a , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=a , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_A , _A: Tuple = parser.parse_known_args()
_A: List[str] = parse_numeric_n_bool_cl_kwargs(a )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
_A: int = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_A: List[str] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=a )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
_A: Dict = generate_summaries_or_translations(
a , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **a , )
if args.reference_path is None:
return {}
# Compute scores
_A: Dict = calculate_bleu if '''translation''' in args.task else calculate_rouge
_A: List[Any] = [x.rstrip() for x in open(args.save_path ).readlines()]
_A: Any = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(a )]
_A: dict = score_fn(a , a )
scores.update(a )
if args.dump_args:
scores.update(a )
if args.info:
_A: Optional[Any] = args.info
if verbose:
print(a )
if args.score_path is not None:
json.dump(a , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301
| 0
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 65
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case ( unittest.TestCase ):
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
snake_case__ : Optional[int] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : List[str] = 'sshleifer/tiny-gpt2'
snake_case__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : Optional[Any] = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = 'sgugger/tiny-distilbert-classification'
snake_case__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , only_pretrain_model=__UpperCamelCase , )
snake_case__ : Dict = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Dict = 'sshleifer/tiny-gpt2'
snake_case__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , torchscript=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : Optional[Any] = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Any = 'sshleifer/tiny-gpt2'
snake_case__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , fpaa=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : Optional[int] = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = 'sshleifer/tiny-gpt2'
snake_case__ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase )
# set architectures equal to `None`
snake_case__ : Union[str, Any] = None
snake_case__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : List[Any] = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
snake_case__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[Any] = 'sshleifer/tiny-gpt2'
snake_case__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : str = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = 'sshleifer/tiny-gpt2'
snake_case__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__UpperCamelCase , multi_process=__UpperCamelCase , )
snake_case__ : List[Any] = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Union[str, Any] = 'sshleifer/tiny-gpt2'
snake_case__ : Optional[Any] = AutoConfig.from_pretrained(__UpperCamelCase )
snake_case__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : List[str] = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
snake_case__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : List[str] = 'sshleifer/tinier_bart'
snake_case__ : List[str] = AutoConfig.from_pretrained(__UpperCamelCase )
snake_case__ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : int = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
snake_case__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Any = 'sshleifer/tiny-gpt2'
snake_case__ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase )
snake_case__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : int = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
snake_case__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = 'sshleifer/tinier_bart'
snake_case__ : Union[str, Any] = AutoConfig.from_pretrained(__UpperCamelCase )
snake_case__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case__ : Union[str, Any] = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
snake_case__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , save_to_csv=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCamelCase , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(__UpperCamelCase , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(__UpperCamelCase , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(__UpperCamelCase , 'train_time.csv' ) , env_info_csv_file=os.path.join(__UpperCamelCase , 'env.csv' ) , multi_process=__UpperCamelCase , )
snake_case__ : Union[str, Any] = PyTorchBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , 'env.csv' ) ).exists() )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__UpperCamelCase ):
self.assertTrue(hasattr(__UpperCamelCase , 'sequential' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'cumulative' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'current' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCamelCase , 'log.txt' ) , log_print=__UpperCamelCase , trace_memory_line_by_line=__UpperCamelCase , multi_process=__UpperCamelCase , )
snake_case__ : int = PyTorchBenchmark(__UpperCamelCase )
snake_case__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase , 'log.txt' ) ).exists() )
| 143
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a__ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"]
UpperCAmelCase__ : str = "ViltImageProcessor"
UpperCAmelCase__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Any:
_a : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_a : Dict = kwargs.pop('''feature_extractor''' )
_a : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
_a : int = self.image_processor
def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
_a : Tuple = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
_a : str = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def __lowercase ( self , *_a , **_a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> str:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowercase ( self ) -> Optional[int]:
_a : str = self.tokenizer.model_input_names
_a : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __lowercase ( self ) -> Any:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 15
| 1
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BioGptTokenizer
snake_case_ = False
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__lowerCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__lowerCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = 'lower newer'
__lowerCamelCase = 'lower newer'
return input_text, output_text
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = BioGptTokenizer(self.vocab_file , self.merges_file )
__lowerCamelCase = 'lower'
__lowerCamelCase = ['low', 'er</w>']
__lowerCamelCase = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = tokens + ['<unk>']
__lowerCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__lowerCamelCase = tokenizer.encode('sequence builders' , add_special_tokens=lowerCamelCase__ )
__lowerCamelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCamelCase__ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 90
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : int = logging.get_logger(__name__)
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple=False ):
lowerCamelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase_ = ""
else:
lowerCamelCase_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ):
lowerCamelCase_ = dct.pop(UpperCAmelCase_ )
lowerCamelCase_ = val
def __snake_case ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] ):
lowerCamelCase_ = ViTConfig()
lowerCamelCase_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCamelCase_ = True
lowerCamelCase_ = int(vit_name[-12:-10] )
lowerCamelCase_ = int(vit_name[-9:-6] )
else:
lowerCamelCase_ = 1000
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "imagenet-1k-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = int(vit_name[-6:-4] )
lowerCamelCase_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
lowerCamelCase_ = 192
lowerCamelCase_ = 768
lowerCamelCase_ = 12
lowerCamelCase_ = 3
elif vit_name[9:].startswith("small" ):
lowerCamelCase_ = 384
lowerCamelCase_ = 1536
lowerCamelCase_ = 12
lowerCamelCase_ = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
lowerCamelCase_ = 768
lowerCamelCase_ = 2304
lowerCamelCase_ = 8
lowerCamelCase_ = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
lowerCamelCase_ = 1024
lowerCamelCase_ = 4096
lowerCamelCase_ = 24
lowerCamelCase_ = 16
elif vit_name[4:].startswith("huge" ):
lowerCamelCase_ = 1280
lowerCamelCase_ = 5120
lowerCamelCase_ = 32
lowerCamelCase_ = 16
# load original model from timm
lowerCamelCase_ = timm.create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase_ = timm_model.state_dict()
if base_model:
remove_classification_head_(UpperCAmelCase_ )
lowerCamelCase_ = create_rename_keys(UpperCAmelCase_ , UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase_ = ViTModel(UpperCAmelCase_ ).eval()
else:
lowerCamelCase_ = ViTForImageClassification(UpperCAmelCase_ ).eval()
model.load_state_dict(UpperCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCamelCase_ = DeiTImageProcessor(size=config.image_size )
else:
lowerCamelCase_ = ViTImageProcessor(size=config.image_size )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = encoding["pixel_values"]
lowerCamelCase_ = model(UpperCAmelCase_ )
if base_model:
lowerCamelCase_ = timm_model.forward_features(UpperCAmelCase_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(UpperCAmelCase_ , outputs.pooler_output , atol=1E-3 )
else:
lowerCamelCase_ = timm_model(UpperCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase_ , outputs.logits , atol=1E-3 )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a_ : List[str] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 55
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowerCamelCase ( lowerCAmelCase : list ):
"""simple docstring"""
if not postfix_notation:
return 0
__magic_name__ : Optional[int] = {'+', '-', '*', '/'}
__magic_name__ : list[Any] = []
for token in postfix_notation:
if token in operations:
__magic_name__ : List[str] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowerCAmelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
'''simple docstring'''
import socket
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[str] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__magic_name__ : Union[str, Any] = socket.gethostname()
__magic_name__ : int = 1_2312
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
__magic_name__ : Optional[int] = sock.recv(1024 )
if not data:
break
out_file.write(lowerCAmelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 275
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_A : List[Any] =logging.get_logger(__name__)
_A : List[str] ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Optional[Any] ={
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
_A : Optional[Any] ={
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
a = RobertaTokenizer
def __init__( self: List[str] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]="replace" , UpperCamelCase__: List[Any]="<s>" , UpperCamelCase__: Optional[Any]="</s>" , UpperCamelCase__: str="</s>" , UpperCamelCase__: List[Any]="<s>" , UpperCamelCase__: Union[str, Any]="<unk>" , UpperCamelCase__: Dict="<pad>" , UpperCamelCase__: Any="<mask>" , UpperCamelCase__: str=False , UpperCamelCase__: List[Any]=True , **UpperCamelCase__: int , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : int = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : Union[str, Any] = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Any = add_prefix_space
lowerCamelCase__ : List[Any] = """post_processor"""
lowerCamelCase__ : Optional[Any] = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
if tokenizer_component_instance:
lowerCamelCase__ : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase__ : Optional[Any] = tuple(state["""sep"""] )
if "cls" in state:
lowerCamelCase__ : List[Any] = tuple(state["""cls"""] )
lowerCamelCase__ : int = False
if state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Optional[Any] = add_prefix_space
lowerCamelCase__ : Any = True
if state.get("""trim_offsets""" , UpperCamelCase__ ) != trim_offsets:
lowerCamelCase__ : Optional[Any] = trim_offsets
lowerCamelCase__ : Tuple = True
if changes_to_apply:
lowerCamelCase__ : Optional[int] = getattr(UpperCamelCase__ , state.pop("""type""" ) )
lowerCamelCase__ : Any = component_class(**UpperCamelCase__ )
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
@property
def lowerCamelCase_ ( self: Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self: str , UpperCamelCase__: int ):
lowerCamelCase__ : int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else value
lowerCamelCase__ : str = value
def lowerCamelCase_ ( self: Any , *UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : List[str] = kwargs.get("""is_split_into_words""" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Any , *UpperCamelCase__: Any , **UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : int = kwargs.get("""is_split_into_words""" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Any , UpperCamelCase__: int=None ):
lowerCamelCase__ : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ):
lowerCamelCase__ : Optional[int] = [self.sep_token_id]
lowerCamelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 41
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a ( metaclass=_lowerCamelCase ):
snake_case_ = ["transformers", "torch", "note_seq"]
def __init__( self : Union[str, Any] , *lowercase_ : Optional[int] , **lowercase_ : int ):
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def A_ ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str ):
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def A_ ( cls : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : List[Any] ):
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 56
| 0
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( __SCREAMING_SNAKE_CASE : list[list[int]] ):
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__SCREAMING_SNAKE_CASE ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 264
| 0
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case_ ( __A ):
def __init__( self : Optional[Any] , lowercase_ : AutoencoderKL , lowercase_ : CLIPTextModel , lowercase_ : CLIPTokenizer , lowercase_ : UNetaDConditionModel , lowercase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowercase_ : StableDiffusionSafetyChecker , lowercase_ : CLIPImageProcessor , ) -> Optional[int]:
super().__init__()
self.register_modules(
vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , )
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Union[str, int]] = "auto" ) -> Optional[int]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
self.enable_attention_slicing(lowercase_ )
@torch.no_grad()
def __call__( self : int , lowercase_ : Union[str, List[str]] , lowercase_ : int = 5_12 , lowercase_ : int = 5_12 , lowercase_ : int = 50 , lowercase_ : float = 7.5 , lowercase_ : Optional[Union[str, List[str]]] = None , lowercase_ : Optional[int] = 1 , lowercase_ : float = 0.0 , lowercase_ : Optional[torch.Generator] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , lowercase_ : Optional[torch.FloatTensor] = None , **lowercase_ : int , ) -> List[Any]:
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : Optional[int] = 1
elif isinstance(lowercase_ , lowercase_ ):
lowercase__ : List[Any] = len(lowercase_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(lowercase_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowercase_ )}.''' )
# get prompt text embeddings
lowercase__ : List[str] = self.tokenizer(
lowercase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
lowercase__ : str = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ : Any = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowercase__ : str = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowercase__ : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ : List[Any] = text_embeddings.shape
lowercase__ : Union[str, Any] = text_embeddings.repeat(1 , lowercase_ , 1 )
lowercase__ : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ : Tuple = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ : List[str]
if negative_prompt is None:
lowercase__ : int = [""]
elif type(lowercase_ ) is not type(lowercase_ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_ )} !='''
F''' {type(lowercase_ )}.''' )
elif isinstance(lowercase_ , lowercase_ ):
lowercase__ : str = [negative_prompt]
elif batch_size != len(lowercase_ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(lowercase_ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
lowercase__ : Union[str, Any] = negative_prompt
lowercase__ : List[Any] = text_input_ids.shape[-1]
lowercase__ : Any = self.tokenizer(
lowercase_ , padding="max_length" , max_length=lowercase_ , truncation=lowercase_ , return_tensors="pt" , )
lowercase__ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ : Union[str, Any] = uncond_embeddings.shape[1]
lowercase__ : str = uncond_embeddings.repeat(lowercase_ , lowercase_ , 1 )
lowercase__ : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ : int = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ : Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowercase__ : Union[str, Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ : Tuple = torch.randn(
lowercase_ , generator=lowercase_ , device="cpu" , dtype=lowercase_ ).to(self.device )
lowercase__ : Union[str, Any] = torch.randn(lowercase_ , generator=lowercase_ , device="cpu" , dtype=lowercase_ ).to(
self.device )
else:
lowercase__ : Any = torch.randn(
lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
lowercase__ : Tuple = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowercase__ : Dict = latents_reference.to(self.device )
lowercase__ : str = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowercase__ : Union[str, Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
lowercase__ : str = (latents_shape[2] - latents_shape_reference[2]) // 2
lowercase__ : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowercase__ : int = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowercase__ : Any = 0 if dx < 0 else dx
lowercase__ : Optional[Any] = 0 if dy < 0 else dy
lowercase__ : List[Any] = max(-dx , 0 )
lowercase__ : str = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowercase__ : Any = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowercase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ : int = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ : Tuple = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ : int = {}
if accepts_eta:
lowercase__ : List[Any] = eta
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ : Any = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
# predict the noise residual
lowercase__ : Optional[int] = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_ ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ : List[str] = noise_pred.chunk(2 )
lowercase__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : Optional[int] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : int = 1 / 0.1_82_15 * latents
lowercase__ : Dict = self.vae.decode(lowercase_ ).sample
lowercase__ : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowercase__ : List[str] = self.feature_extractor(self.numpy_to_pil(lowercase_ ) , return_tensors="pt" ).to(
self.device )
lowercase__ , lowercase__ : int = self.safety_checker(
images=lowercase_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowercase__ : List[str] = None
if output_type == "pil":
lowercase__ : List[str] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_ )
| 87
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _SCREAMING_SNAKE_CASE ( *SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=2 ):
from .. import __version__
A_ : Union[str, Any] = take_from
A_ : Optional[Any] = ()
if not isinstance(args[0] , SCREAMING_SNAKE_CASE ):
A_ : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(SCREAMING_SNAKE_CASE ).base_version ) >= version.parse(SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
A_ : List[Any] = None
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(SCREAMING_SNAKE_CASE ),)
A_ : Optional[Any] = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
values += (getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),)
A_ : int = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
A_ : List[Any] = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
A_ : Union[str, Any] = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , SCREAMING_SNAKE_CASE , stacklevel=SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) > 0:
A_ : Dict = inspect.getouterframes(inspect.currentframe() )[1]
A_ : Optional[int] = call_frame.filename
A_ : Optional[int] = call_frame.lineno
A_ : str = call_frame.function
A_ , A_ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
return
elif len(SCREAMING_SNAKE_CASE ) == 1:
return values[0]
return values
| 186
| 0
|
from ....utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : List[str]=20_48 ):
'''simple docstring'''
_snake_case : Union[str, Any] = config.__dict__
_snake_case : Optional[int] = modal_hidden_size
if num_labels:
_snake_case : Tuple = num_labels
| 260
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 260
| 1
|
'''simple docstring'''
import qiskit
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> qiskit.result.counts.Counts:
UpperCAmelCase__ : List[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase__ : List[Any] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase__ : str = qiskit.execute(lowerCAmelCase__ , lowerCAmelCase__ , shots=10_00 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 181
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 181
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
assert column_title.isupper()
lowercase__ : Tuple = 0
lowercase__ : int = len(lowerCamelCase_ ) - 1
lowercase__ : Any = 0
while index >= 0:
lowercase__ : Dict = (ord(column_title[index] ) - 64) * pow(26 , lowerCamelCase_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 368
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase_ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = "maskformer"
lowerCAmelCase : Any = {"hidden_size": "mask_feature_size"}
lowerCAmelCase : Optional[int] = ["resnet", "swin"]
lowerCAmelCase : str = ["detr"]
def __init__( self : int ,_snake_case : int = 256 ,_snake_case : int = 256 ,_snake_case : float = 0.1 ,_snake_case : bool = False ,_snake_case : Optional[Dict] = None ,_snake_case : Optional[Dict] = None ,_snake_case : float = 0.02 ,_snake_case : float = 1.0 ,_snake_case : float = 1.0 ,_snake_case : float = 1.0 ,_snake_case : float = 20.0 ,_snake_case : Optional[bool] = None ,**_snake_case : Optional[Any] ,) -> Dict:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase__ : Any = SwinConfig(
image_size=384 ,in_channels=3 ,patch_size=4 ,embed_dim=128 ,depths=[2, 2, 18, 2] ,num_heads=[4, 8, 16, 32] ,window_size=12 ,drop_path_rate=0.3 ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ,)
if isinstance(_snake_case ,_snake_case ):
lowercase__ : List[str] = backbone_config.pop('''model_type''' )
lowercase__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(_snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase__ : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
lowercase__ : Tuple = (
decoder_config.pop('''model_type''' ) if isinstance(_snake_case ,_snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(_snake_case ,_snake_case ):
lowercase__ : Optional[int] = CONFIG_MAPPING[decoder_type]
lowercase__ : Optional[Any] = config_class.from_dict(_snake_case )
lowercase__ : List[Any] = backbone_config
lowercase__ : List[Any] = decoder_config
# main feature dimension for the model
lowercase__ : List[str] = fpn_feature_size
lowercase__ : int = mask_feature_size
# initializer
lowercase__ : str = init_std
lowercase__ : str = init_xavier_std
# Hungarian matcher && loss
lowercase__ : Optional[int] = cross_entropy_weight
lowercase__ : List[Any] = dice_weight
lowercase__ : List[str] = mask_weight
lowercase__ : str = use_auxiliary_loss
lowercase__ : Optional[int] = no_object_weight
lowercase__ : Optional[Any] = output_auxiliary_logits
lowercase__ : Optional[Any] = self.decoder_config.encoder_attention_heads
lowercase__ : Optional[Any] = self.decoder_config.num_hidden_layers
super().__init__(**_snake_case )
@classmethod
def UpperCAmelCase ( cls : Any ,_snake_case : PretrainedConfig ,_snake_case : PretrainedConfig ,**_snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return cls(
backbone_config=_snake_case ,decoder_config=_snake_case ,**_snake_case ,)
def UpperCAmelCase ( self : str ) -> Dict[str, any]:
"""simple docstring"""
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : List[Any] = self.decoder_config.to_dict()
lowercase__ : List[str] = self.__class__.model_type
return output
| 302
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
UpperCamelCase__ = logging.get_logger(__name__)
@dataclass
class lowerCamelCase_ :
def __init__( self : str , _A : Union[str, Any]=False , _A : Tuple=False , _A : Union[str, Any]=6.0 , _A : Any=None , _A : Tuple=False , _A : Optional[int]=False , _A : Tuple=None , _A : Union[str, Any]="fp4" , _A : Tuple=False , **_A : str , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = load_in_abit
UpperCAmelCase__ : str = load_in_abit
UpperCAmelCase__ : List[str] = llm_inta_threshold
UpperCAmelCase__ : Tuple = llm_inta_skip_modules
UpperCAmelCase__ : str = llm_inta_enable_fpaa_cpu_offload
UpperCAmelCase__ : Tuple = llm_inta_has_fpaa_weight
UpperCAmelCase__ : Tuple = bnb_abit_quant_type
UpperCAmelCase__ : int = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
UpperCAmelCase__ : str = torch.floataa
elif isinstance(_A , _A ):
UpperCAmelCase__ : Optional[int] = getattr(_A , _A )
elif isinstance(_A , torch.dtype ):
UpperCAmelCase__ : Union[str, Any] = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' )
self.post_init()
def lowercase_ ( self : Dict ):
'''simple docstring'''
if not isinstance(self.llm_inta_threshold , _A ):
raise ValueError('''llm_int8_threshold must be a float''' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _A ):
raise ValueError('''llm_int8_skip_modules must be a list of strings''' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _A ):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' )
if not isinstance(self.llm_inta_has_fpaa_weight , _A ):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' )
if not isinstance(self.bnb_abit_quant_type , _A ):
raise ValueError('''bnb_4bit_quant_type must be a string''' )
if not isinstance(self.bnb_abit_use_double_quant , _A ):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' )
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse(
'''0.39.0''' ):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return self.load_in_abit or self.load_in_abit
def lowercase_ ( self : int ):
'''simple docstring'''
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def lowercase_ ( cls : Tuple , _A : int , _A : Union[str, Any] , **_A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = cls(**_A )
UpperCAmelCase__ : int = []
for key, value in kwargs.items():
if hasattr(_A , _A ):
setattr(_A , _A , _A )
to_remove.append(_A )
for key in to_remove:
kwargs.pop(_A , _A )
if return_unused_kwargs:
return config, kwargs
else:
return config
def lowercase_ ( self : Optional[int] , _A : Union[str, os.PathLike] ):
'''simple docstring'''
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
UpperCAmelCase__ : List[str] = self.to_dict()
UpperCAmelCase__ : Optional[int] = json.dumps(_A , indent=2 , sort_keys=_A ) + '''\n'''
writer.write(_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ : List[str] = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1]
return output
def __repr__( self : Tuple ):
'''simple docstring'''
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def lowercase_ ( self : Optional[Any] , _A : bool = True ):
'''simple docstring'''
if use_diff is True:
UpperCAmelCase__ : str = self.to_diff_dict()
else:
UpperCAmelCase__ : List[Any] = self.to_dict()
return json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.to_dict()
# get the default config dict
UpperCAmelCase__ : str = BitsAndBytesConfig().to_dict()
UpperCAmelCase__ : Union[str, Any] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
UpperCAmelCase__ : str = value
return serializable_config_dict
| 181
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCamelCase_ ( __a ):
def __init__( self : List[str] , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
_A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , )
UpperCAmelCase__ : List[str] = field
UpperCAmelCase__ : Optional[Any] = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths}
UpperCAmelCase__ : Any = Json(
cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , )
def lowercase_ ( self : Dict ):
'''simple docstring'''
if self.streaming:
UpperCAmelCase__ : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[Any] = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , )
UpperCAmelCase__ : str = self.builder.as_dataset(
split=self.split , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : List[str] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
UpperCAmelCase__ : Dict = dataset
UpperCAmelCase__ : Any = path_or_buf
UpperCAmelCase__ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase__ : Tuple = num_proc
UpperCAmelCase__ : Any = '''utf-8'''
UpperCAmelCase__ : Optional[int] = to_json_kwargs
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.to_json_kwargs.pop('''path_or_buf''' , _A )
UpperCAmelCase__ : Optional[int] = self.to_json_kwargs.pop('''orient''' , '''records''' )
UpperCAmelCase__ : Tuple = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
UpperCAmelCase__ : str = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
UpperCAmelCase__ : Optional[Any] = self.to_json_kwargs.pop('''compression''' , _A )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=_A ) as buffer:
UpperCAmelCase__ : Union[str, Any] = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
UpperCAmelCase__ : Union[str, Any] = self._write(
file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs )
return written
def lowercase_ ( self : Optional[int] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = args
UpperCAmelCase__ : Dict = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCAmelCase__ : str = batch.to_pandas().to_json(
path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowercase_ ( self : Union[str, Any] , _A : BinaryIO , _A : Optional[int] , _A : int , _A : Any , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
UpperCAmelCase__ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_A )
else:
UpperCAmelCase__ , UpperCAmelCase__ : str = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(_A )
return written
| 181
| 1
|
import socket
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : List[str] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowerCamelCase__ : Optional[int] = socket.gethostname()
lowerCamelCase__ : str = 1_2312
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
lowerCamelCase__ : List[str] = sock.recv(1024 )
if not data:
break
out_file.write(_UpperCAmelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 45
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : List[str] = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
| 1
|
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def lowercase ( A_="ro" , A_="en" , A_="wmt16" , A_=None )-> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
a : List[Any] = F'''{src_lang}-{tgt_lang}'''
print(F'''Converting {dataset}-{pair}''' )
a : Tuple = datasets.load_dataset(A_ , A_ )
if save_dir is None:
a : Dict = F'''{dataset}-{pair}'''
a : str = Path(A_ )
save_dir.mkdir(exist_ok=A_ )
for split in ds.keys():
print(F'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
a : Any = "val" if split == "validation" else split
a : Tuple = save_dir.joinpath(F'''{fn}.source''' )
a : Any = save_dir.joinpath(F'''{fn}.target''' )
a : Tuple = src_path.open("w+" )
a : List[Any] = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
a : Any = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 40
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
snake_case_ = s_dict.pop(__UpperCAmelCase )
elif "subsample" in key:
snake_case_ = s_dict.pop(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ ,snake_case_ = emb.weight.shape
snake_case_ = nn.Linear(__UpperCAmelCase, __UpperCAmelCase, bias=__UpperCAmelCase )
snake_case_ = emb.weight.data
return lin_layer
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict:
'''simple docstring'''
snake_case_ = torch.load(__UpperCAmelCase, map_location='''cpu''' )
snake_case_ = mam_aaa['''args''']
snake_case_ = mam_aaa['''model''']
snake_case_ = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(__UpperCAmelCase )
rename_keys(__UpperCAmelCase )
snake_case_ = state_dict['''decoder.embed_tokens.weight'''].shape[0]
snake_case_ = args.share_decoder_input_output_embed
snake_case_ = [int(__UpperCAmelCase ) for i in args.conv_kernel_sizes.split(''',''' )]
snake_case_ = SpeechaTextConfig(
vocab_size=__UpperCAmelCase, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', num_conv_layers=len(__UpperCAmelCase ), conv_channels=args.conv_channels, conv_kernel_sizes=__UpperCAmelCase, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=__UpperCAmelCase, num_beams=5, max_length=200, use_cache=__UpperCAmelCase, decoder_start_token_id=2, early_stopping=__UpperCAmelCase, )
snake_case_ = SpeechaTextForConditionalGeneration(__UpperCAmelCase )
snake_case_ ,snake_case_ = model.model.load_state_dict(__UpperCAmelCase, strict=__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0 and not set(__UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F" but all the following weights are missing {missing}" )
if tie_embeds:
snake_case_ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case_ = lm_head_weights
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
a : List[Any] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 56
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
__lowerCAmelCase : Optional[Any] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = 'sgugger/tiny-distilbert-classification'
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , only_pretrain_model=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : Any = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'patrickvonplaten/t5-tiny-random'
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
__lowerCAmelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , save_to_csv=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'env.csv' ) , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'env.csv' ) ).exists() )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(_SCREAMING_SNAKE_CASE ):
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'sequential' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'cumulative' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'current' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_SCREAMING_SNAKE_CASE , 'log.txt' ) , log_print=_SCREAMING_SNAKE_CASE , trace_memory_line_by_line=_SCREAMING_SNAKE_CASE , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'log.txt' ) ).exists() )
| 182
|
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
if len(_UpperCamelCase ) <= 1:
return lst
__lowerCAmelCase : str = 1
while i < len(_UpperCamelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
__lowerCAmelCase : int = 1
return lst
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 182
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE :List[Any] = {
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Dict = [
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 15
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :List[Any] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "yolos"
def __init__( self : Any ,A : Optional[Any]=7_68 ,A : Dict=12 ,A : Any=12 ,A : str=30_72 ,A : Any="gelu" ,A : str=0.0 ,A : List[str]=0.0 ,A : Dict=0.02 ,A : int=1E-12 ,A : Tuple=[5_12, 8_64] ,A : List[Any]=16 ,A : str=3 ,A : str=True ,A : Any=1_00 ,A : Dict=True ,A : Dict=False ,A : Tuple=1 ,A : Union[str, Any]=5 ,A : Optional[Any]=2 ,A : Union[str, Any]=5 ,A : int=2 ,A : int=0.1 ,**A : List[str] ,):
super().__init__(**A )
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = initializer_range
__A = layer_norm_eps
__A = image_size
__A = patch_size
__A = num_channels
__A = qkv_bias
__A = num_detection_tokens
__A = use_mid_position_embeddings
__A = auxiliary_loss
# Hungarian matcher
__A = class_cost
__A = bbox_cost
__A = giou_cost
# Loss coefficients
__A = bbox_loss_coefficient
__A = giou_loss_coefficient
__A = eos_coefficient
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = version.parse("1.11" )
@property
def UpperCamelCase_ ( self : str ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCamelCase_ ( self : List[Any] ):
return 1E-4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return 12
| 15
| 1
|
"""simple docstring"""
from math import ceil
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
A = list(range(0 , lowercase__ ) )
A = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
A = []
for i in device_map_blocks:
if device_map_blocks.count(lowercase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowercase__ )
# Missing blocks
A = [i for i in blocks if i not in device_map_blocks]
A = [i for i in device_map_blocks if i not in blocks]
if len(lowercase__ ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(lowercase__ ) )
if len(lowercase__ ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(lowercase__ ) )
if len(lowercase__ ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(lowercase__ ) )
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
A = list(range(lowercase__ ) )
A = int(ceil(n_layers / len(lowercase__ ) ) )
A = [layers[i : i + n_blocks] for i in range(0 , lowercase__ , lowercase__ )]
return dict(zip(lowercase__ , lowercase__ ) )
| 57
|
"""simple docstring"""
__A : Optional[Any] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 57
| 1
|
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(snake_case ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(snake_case ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a__ : Dict = logging.getLogger(__name__)
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class UpperCamelCase__ :
UpperCAmelCase__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
UpperCAmelCase__ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
UpperCAmelCase__ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
UpperCAmelCase__ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class UpperCamelCase__ :
UpperCAmelCase__ : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys())})
UpperCAmelCase__ : str = field(metadata={'help': 'Should contain the data files for the task.'})
UpperCAmelCase__ : int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCAmelCase__ : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def snake_case ( )-> int:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__A , __A , __A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
__A = processors[data_args.task_name]()
__A = processor.get_labels()
__A = len(UpperCAmelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__A = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
__A = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__A = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCAmelCase ) -> Dict:
__A = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )}
# Data collator
__A = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__A = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__A = trainer.evaluate()
__A = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(UpperCAmelCase )
return results
def snake_case ( UpperCAmelCase )-> List[str]:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 161
| 0
|
"""simple docstring"""
from collections.abc import Sequence
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(__lowerCAmelCase ) )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = 0.0
for coeff in reversed(__lowerCAmelCase ):
UpperCamelCase = result * x + coeff
return result
if __name__ == "__main__":
lowerCAmelCase__ = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCAmelCase__ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 350
|
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (position - 1) // 2
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (2 * position) + 1
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (2 * position) + 2
class _lowerCamelCase ( Generic[T] ):
def __init__(self ) -> None:
UpperCamelCase = []
UpperCamelCase = {}
UpperCamelCase = 0
def __len__(self ) -> int:
return self.elements
def __repr__(self ) -> str:
return str(self.heap )
def snake_case_ (self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def snake_case_ (self , __a , __a ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
UpperCamelCase = self.elements
self.elements += 1
self._bubble_up(__a )
def snake_case_ (self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCamelCase , UpperCamelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCamelCase , UpperCamelCase = self.heap[0]
self._bubble_down(__a )
return elem
def snake_case_ (self , __a , __a ) -> None:
# Update the weight of the given key
UpperCamelCase = self.position_map[elem]
UpperCamelCase = (elem, weight)
if position > 0:
UpperCamelCase = get_parent_position(__a )
UpperCamelCase , UpperCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__a )
else:
self._bubble_down(__a )
else:
self._bubble_down(__a )
def snake_case_ (self , __a ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
UpperCamelCase = self.position_map[elem]
if curr_pos == 0:
return None
UpperCamelCase = get_parent_position(__a )
UpperCamelCase , UpperCamelCase = self.heap[curr_pos]
UpperCamelCase , UpperCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__a , __a )
return self._bubble_up(__a )
return None
def snake_case_ (self , __a ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
UpperCamelCase = self.position_map[elem]
UpperCamelCase , UpperCamelCase = self.heap[curr_pos]
UpperCamelCase = get_child_left_position(__a )
UpperCamelCase = get_child_right_position(__a )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCamelCase , UpperCamelCase = self.heap[child_left_position]
UpperCamelCase , UpperCamelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__a , __a )
return self._bubble_down(__a )
if child_left_position < self.elements:
UpperCamelCase , UpperCamelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__a , __a )
return self._bubble_down(__a )
else:
return None
if child_right_position < self.elements:
UpperCamelCase , UpperCamelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__a , __a )
return self._bubble_down(__a )
return None
def snake_case_ (self , __a , __a ) -> None:
# Swap the nodes at the given positions
UpperCamelCase = self.heap[nodea_pos][0]
UpperCamelCase = self.heap[nodea_pos][0]
UpperCamelCase , UpperCamelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCamelCase = nodea_pos
UpperCamelCase = nodea_pos
class _lowerCamelCase ( Generic[T] ):
def __init__(self ) -> None:
UpperCamelCase = {}
UpperCamelCase = 0
def __repr__(self ) -> str:
return str(self.connections )
def __len__(self ) -> int:
return self.nodes
def snake_case_ (self , __a ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
UpperCamelCase = {}
self.nodes += 1
def snake_case_ (self , __a , __a , __a ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__a )
self.add_node(__a )
UpperCamelCase = weight
UpperCamelCase = weight
def a__ ( _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase = {node: maxsize for node in graph.connections}
UpperCamelCase = {node: None for node in graph.connections}
UpperCamelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCamelCase = priority_queue.extract_min()
UpperCamelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] )
UpperCamelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCamelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] )
UpperCamelCase = node
return dist, parent
| 244
| 0
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
a : str = Lock()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase : Dict = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase : Optional[int] = min(_lowercase , _lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase : Optional[Any] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase : Dict = max(_lowercase , _lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_lowercase )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : int = []
UpperCAmelCase : str = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase : List[str] = Pipe()
UpperCAmelCase : Union[str, Any] = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase : Union[str, Any] = temp_rs
UpperCAmelCase : Any = temp_rr
for i in range(1 , len(_lowercase ) - 1 ):
UpperCAmelCase : Optional[Any] = Pipe()
UpperCAmelCase : Optional[int] = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase : int = temp_rs
UpperCAmelCase : Any = temp_rr
process_array_.append(
Process(
target=_lowercase , args=(
len(_lowercase ) - 1,
arr[len(_lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_lowercase ) ):
UpperCAmelCase : Any = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __lowerCamelCase ( ) -> List[str]:
UpperCAmelCase : Any = list(range(1_0 , 0 , -1 ) )
print("""Initial List""" )
print(*_lowercase )
UpperCAmelCase : Dict = odd_even_transposition(_lowercase )
print("""Sorted List\n""" )
print(*_lowercase )
if __name__ == "__main__":
main()
| 265
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
a : Union[str, Any] = torch.device("""cpu""")
def __lowerCamelCase ( ) -> Any:
UpperCAmelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
def __lowerCamelCase ( _lowercase ) -> Dict:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : Union[str, Any] = dct.pop(_lowercase )
UpperCAmelCase : str = val
def __lowerCamelCase ( _lowercase ) -> List[str]:
UpperCAmelCase : Tuple = []
for k in state_dict.keys():
UpperCAmelCase : Dict = k
if ".pwconv" in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
UpperCAmelCase : Dict = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
UpperCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
UpperCAmelCase : Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
UpperCAmelCase : Optional[Any] = k_new.split(""".""" )
if ls[2].isdigit():
UpperCAmelCase : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase : List[Any] = 1_0_0_0
UpperCAmelCase : List[str] = """huggingface/label-files"""
UpperCAmelCase : Tuple = """imagenet-1k-id2label.json"""
UpperCAmelCase : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Tuple = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Tuple = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase : List[Any] = [3, 3, 6, 4]
UpperCAmelCase : int = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase : str = [3, 3, 9, 6]
UpperCAmelCase : str = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase : List[Any] = [4, 3, 1_0, 5]
UpperCAmelCase : Union[str, Any] = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase : Any = [4, 4, 1_2, 6]
UpperCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , check_hash=_lowercase )
else:
UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" )
UpperCAmelCase : str = checkpoint
UpperCAmelCase : Tuple = create_rename_keys(_lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# load HuggingFace model
UpperCAmelCase : str = SwiftFormerForImageClassification(_lowercase ).eval()
hf_model.load_state_dict(_lowercase )
# prepare test inputs
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
UpperCAmelCase : List[str] = processor(images=_lowercase , return_tensors="""pt""" )
# compare outputs from both models
UpperCAmelCase : List[str] = get_expected_output(_lowercase )
UpperCAmelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , _lowercase , atol=1e-3 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
a : Union[str, Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 265
| 1
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=8 ):
"""simple docstring"""
lowercase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _a ( UpperCamelCase__ ):
def __init__( self: Optional[Any] , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: VQModel , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
lowercase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] ) -> List[Any]:
"""simple docstring"""
if latents is None:
lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowercase__ = latents.to(UpperCamelCase_ )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self: int , UpperCamelCase_: List[str]=0 ) -> Tuple:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase__ = torch.device(f'cuda:{gpu_id}' )
lowercase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[str]=0 ) -> Dict:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
lowercase__ = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ , lowercase__ = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
lowercase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: List[str] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int = 512 , UpperCamelCase_: int = 512 , UpperCamelCase_: int = 100 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> str:
"""simple docstring"""
lowercase__ = self._execution_device
lowercase__ = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = torch.cat(UpperCamelCase_ , dim=0 )
lowercase__ = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
lowercase__ = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
lowercase__ = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
lowercase__ = hint.repeat_interleave(UpperCamelCase_ , dim=0 )
lowercase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
lowercase__ = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.movq.config.latent_channels
lowercase__ , lowercase__ = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
lowercase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = {'''image_embeds''': image_embeds, '''hint''': hint}
lowercase__ = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ , lowercase__ = variance_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
lowercase__ = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
lowercase__ = image * 0.5 + 0.5
lowercase__ = image.clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 362
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : int = '''git_vision_model'''
def __init__( self: Tuple , UpperCamelCase_: Optional[int]=768 , UpperCamelCase_: Optional[int]=3_072 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: Union[str, Any]=12 , UpperCamelCase_: Any=3 , UpperCamelCase_: str=224 , UpperCamelCase_: int=16 , UpperCamelCase_: Any="quick_gelu" , UpperCamelCase_: Union[str, Any]=1E-5 , UpperCamelCase_: Tuple=0.0 , UpperCamelCase_: Tuple=0.02 , **UpperCamelCase_: str , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
@classmethod
def lowerCamelCase_ ( cls: Optional[Any] , UpperCamelCase_: Union[str, os.PathLike] , **UpperCamelCase_: List[Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(UpperCamelCase_ )
lowercase__ , lowercase__ = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
lowercase__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = '''git'''
def __init__( self: Optional[Any] , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: List[str]=30_522 , UpperCamelCase_: List[Any]=768 , UpperCamelCase_: List[Any]=6 , UpperCamelCase_: Union[str, Any]=12 , UpperCamelCase_: Any=3_072 , UpperCamelCase_: Any="gelu" , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: str=1_024 , UpperCamelCase_: Any=0.02 , UpperCamelCase_: str=1E-1_2 , UpperCamelCase_: Optional[int]=0 , UpperCamelCase_: Optional[Any]="absolute" , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: List[Any]=False , UpperCamelCase_: List[Any]=101 , UpperCamelCase_: int=102 , UpperCamelCase_: List[str]=None , **UpperCamelCase_: int , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
if vision_config is None:
lowercase__ = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
lowercase__ = GitVisionConfig(**UpperCamelCase_ )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = tie_word_embeddings
lowercase__ = num_image_with_embedding
lowercase__ = bos_token_id
lowercase__ = eos_token_id
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 93
| 0
|
'''simple docstring'''
_snake_case = 8.3_1_4_4_5_9_8
def _A ( snake_case , snake_case ) -> float:
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 250
|
'''simple docstring'''
_snake_case = 8.3_1_4_4_5_9_8
def _A ( snake_case , snake_case ) -> float:
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 250
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = '''▁'''
__a = {'''vocab_file''': '''spiece.model'''}
__a = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
__a = {
'''google/reformer-crime-and-punishment''': 52_42_88,
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : Any = PRETRAINED_VOCAB_FILES_MAP
A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__=[] , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
lowercase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
lowercase : Optional[Any] = vocab_file
lowercase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase ( self ):
return self.sp_model.get_piece_size()
def __lowerCamelCase ( self ):
lowercase : int = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowercase : List[Any] = self.__dict__.copy()
lowercase : str = None
return state
def __setstate__( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Union[str, Any] = {}
lowercase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if index < self.sp_model.get_piece_size():
lowercase : Optional[int] = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
return token
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = []
lowercase : Dict = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
lowercase : Any = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Any = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as fi:
lowercase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 356
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {'''vocab_file''': '''spiece.model'''}
__a = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
__a = {
'''albert-base-v1''': 5_12,
'''albert-large-v1''': 5_12,
'''albert-xlarge-v1''': 5_12,
'''albert-xxlarge-v1''': 5_12,
'''albert-base-v2''': 5_12,
'''albert-large-v2''': 5_12,
'''albert-xlarge-v2''': 5_12,
'''albert-xxlarge-v2''': 5_12,
}
__a = '''▁'''
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[str, Any] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[MASK]" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase : Optional[Any] = (
AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ , normalized=SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else mask_token
)
lowercase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
lowercase : List[str] = do_lower_case
lowercase : Tuple = remove_space
lowercase : Tuple = keep_accents
lowercase : str = vocab_file
lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase ( self ):
return len(self.sp_model )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowercase : List[Any] = self.__dict__.copy()
lowercase : Optional[Any] = None
return state
def __setstate__( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Any = {}
lowercase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if self.remove_space:
lowercase : int = ''' '''.join(inputs.strip().split() )
else:
lowercase : List[Any] = inputs
lowercase : int = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowercase : Optional[Any] = unicodedata.normalize('''NFKD''' , SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = ''''''.join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE__ )] )
if self.do_lower_case:
lowercase : Union[str, Any] = outputs.lower()
return outputs
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = self.preprocess_text(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
lowercase : Any = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowercase : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE__ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase : Optional[int] = cur_pieces[1:]
else:
lowercase : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(SCREAMING_SNAKE_CASE__ )
else:
new_pieces.append(SCREAMING_SNAKE_CASE__ )
return new_pieces
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : str = []
lowercase : Tuple = ''''''
lowercase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
lowercase : Union[str, Any] = True
lowercase : int = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Optional[Any] = [self.sep_token_id]
lowercase : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Optional[int] = [self.sep_token_id]
lowercase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as fi:
lowercase : Dict = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 173
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
snake_case_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
snake_case_ = DDPMScheduler()
snake_case_ = AudioDiffusionPipeline(vqvae=a__ , unet=self.dummy_unet , mel=a__ , scheduler=a__ )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = torch.Generator(device=a__ ).manual_seed(42 )
snake_case_ = pipe(generator=a__ , steps=4 )
snake_case_ = output.audios[0]
snake_case_ = output.images[0]
snake_case_ = torch.Generator(device=a__ ).manual_seed(42 )
snake_case_ = pipe(generator=a__ , steps=4 , return_dict=a__ )
snake_case_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
snake_case_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
snake_case_ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
snake_case_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
snake_case_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
snake_case_ = DDIMScheduler()
snake_case_ = self.dummy_vqvae_and_unet
snake_case_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=a__ , scheduler=a__ )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
np.random.seed(0 )
snake_case_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
snake_case_ = torch.Generator(device=a__ ).manual_seed(42 )
snake_case_ = pipe(raw_audio=a__ , generator=a__ , start_step=5 , steps=10 )
snake_case_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
snake_case_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
snake_case_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
snake_case_ = self.dummy_unet_condition
snake_case_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=a__ , mel=a__ , scheduler=a__ )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
np.random.seed(0 )
snake_case_ = torch.rand((1, 1, 10) )
snake_case_ = pipe(generator=a__ , encoding=a__ )
snake_case_ = output.images[0]
snake_case_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
snake_case_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = torch_device
snake_case_ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = torch.Generator(device=a__ ).manual_seed(42 )
snake_case_ = pipe(generator=a__ )
snake_case_ = output.audios[0]
snake_case_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
snake_case_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
snake_case_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 85
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 302
| 0
|
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowercase_ = logging.get_logger(__name__)
class _snake_case ( _lowerCAmelCase):
def __init__( self : Optional[Any], *__lowercase : str, **__lowercase : Tuple ):
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead.", _lowercase, )
super().__init__(*_lowercase, **_lowercase )
| 370
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = "huggingface/label-files"
lowercase__ = "imagenet-1k-id2label.json"
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowercase__ = BitConfig(
conv_layer=SCREAMING_SNAKE_CASE_ , num_labels=1000 , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , )
return config
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if "stem.conv" in name:
lowercase__ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
lowercase__ = name.replace("blocks" , "layers" )
if "head.fc" in name:
lowercase__ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
lowercase__ = "bit." + name
if "bit" not in name and "classifier" not in name:
lowercase__ = "bit.encoder." + name
return name
def __lowerCAmelCase ( ):
lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
lowercase__ = get_config(SCREAMING_SNAKE_CASE_ )
# load original model from timm
lowercase__ = create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
# load state_dict of original model
lowercase__ = timm_model.state_dict()
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val.squeeze() if "head" in key else val
# load HuggingFace model
lowercase__ = BitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# create image processor
lowercase__ = create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE_ ) )
lowercase__ = transform.transforms
lowercase__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
lowercase__ = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase__ = prepare_img()
lowercase__ = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
lowercase__ = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# verify logits
with torch.no_grad():
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
lowercase__ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowercase__ = timm_model(SCREAMING_SNAKE_CASE_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowercase_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 224
| 0
|
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__magic_name__ = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def _lowerCAmelCase ( UpperCamelCase_ = "dhaka" , UpperCamelCase_ = 5 ):
__SCREAMING_SNAKE_CASE = min(UpperCamelCase_ , 50 ) # Prevent abuse!
__SCREAMING_SNAKE_CASE = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
__SCREAMING_SNAKE_CASE = requests.get("""https://www.google.com/search""" , params=UpperCamelCase_ , headers=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = BeautifulSoup(html.text , """html.parser""" )
__SCREAMING_SNAKE_CASE = """""".join(
re.findall(r"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
__SCREAMING_SNAKE_CASE = json.dumps(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = json.loads(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = re.findall(
r"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , UpperCamelCase_ , )
if not matched_google_image_data:
return 0
__SCREAMING_SNAKE_CASE = re.sub(
r"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(UpperCamelCase_ ) , )
__SCREAMING_SNAKE_CASE = re.findall(
r"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , UpperCamelCase_ , )
for index, fixed_full_res_image in enumerate(UpperCamelCase_ ):
if index >= max_images:
return index
__SCREAMING_SNAKE_CASE = bytes(UpperCamelCase_ , """ascii""" ).decode(
"""unicode-escape""" )
__SCREAMING_SNAKE_CASE = bytes(UpperCamelCase_ , """ascii""" ).decode(
"""unicode-escape""" )
__SCREAMING_SNAKE_CASE = urllib.request.build_opener()
__SCREAMING_SNAKE_CASE = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = f"query_{query.replace(' ' , '_' )}"
if not os.path.exists(UpperCamelCase_ ):
os.makedirs(UpperCamelCase_ )
urllib.request.urlretrieve( # noqa: S310
UpperCamelCase_ , f"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
__magic_name__ = download_images_from_google_query(sys.argv[1])
print(F"""{image_count} images were downloaded to disk.""")
except IndexError:
print("Please provide a search term.")
raise
| 100
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a__ : Any = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE)
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :List[str] , **_A :Any ) -> Tuple:
'''simple docstring'''
super().__init__(**_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self :Any , _A :Union[str, List[str], "Image", List["Image"]] , **_A :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return super().__call__(_A , **_A )
def lowercase_ ( self :Optional[int] , **_A :Dict ) -> Optional[Any]:
'''simple docstring'''
__A = {}
if "candidate_labels" in kwargs:
__A = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__A = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def lowercase_ ( self :Optional[int] , _A :str , _A :str=None , _A :Tuple="This is a photo of {}." ) -> Optional[int]:
'''simple docstring'''
__A = load_image(_A )
__A = self.image_processor(images=[image] , return_tensors=self.framework )
__A = candidate_labels
__A = [hypothesis_template.format(_A ) for x in candidate_labels]
__A = self.tokenizer(_A , return_tensors=self.framework , padding=_A )
__A = [text_inputs]
return inputs
def lowercase_ ( self :List[str] , _A :Tuple ) -> Tuple:
'''simple docstring'''
__A = model_inputs.pop('candidate_labels' )
__A = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , _A ):
__A = text_inputs[0]
else:
# Batching case.
__A = text_inputs[0][0]
__A = self.model(**_A , **_A )
__A = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def lowercase_ ( self :List[str] , _A :Optional[int] ) -> Dict:
'''simple docstring'''
__A = model_outputs.pop('candidate_labels' )
__A = model_outputs['logits'][0]
if self.framework == "pt":
__A = logits.softmax(dim=-1 ).squeeze(-1 )
__A = probs.tolist()
if not isinstance(_A , _A ):
__A = [scores]
elif self.framework == "tf":
__A = stable_softmax(_A , axis=-1 )
__A = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__A = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] )
]
return result
| 161
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
def A_ ( _UpperCAmelCase = 10**9 ):
SCREAMING_SNAKE_CASE_: List[str] = 1
SCREAMING_SNAKE_CASE_: Optional[int] = 2
SCREAMING_SNAKE_CASE_: int = 0
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: List[str] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
SCREAMING_SNAKE_CASE_: Any = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 127
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _a , _a , _a , unittest.TestCase ):
_a = StableDiffusionInpaintPipeline
_a = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_a = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def __lowercase ( self : Dict ):
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase , )
lowerCAmelCase = PNDMScheduler(skip_prk_steps=lowerCAmelCase )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCAmelCase = CLIPTextModel(lowerCAmelCase )
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(lowerCAmelCase ).startswith("""mps""" ):
lowerCAmelCase = torch.manual_seed(lowerCAmelCase )
else:
lowerCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __lowercase ( self : Any ):
lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionInpaintPipeline(**lowerCAmelCase )
lowerCAmelCase = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
lowerCAmelCase = sd_pipe(**lowerCAmelCase ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase ( self : Optional[int] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
lowerCAmelCase = """stabilityai/stable-diffusion-2-inpainting"""
lowerCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase , safety_checker=lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowerCAmelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , generator=lowerCAmelCase , output_type="""np""" , )
lowerCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def __lowercase ( self : Dict ):
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
lowerCAmelCase = """stabilityai/stable-diffusion-2-inpainting"""
lowerCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase , torch_dtype=torch.floataa , safety_checker=lowerCAmelCase , )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowerCAmelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , generator=lowerCAmelCase , output_type="""np""" , )
lowerCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __lowercase ( self : Optional[int] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCAmelCase = """stabilityai/stable-diffusion-2-inpainting"""
lowerCAmelCase = PNDMScheduler.from_pretrained(lowerCAmelCase , subfolder="""scheduler""" )
lowerCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase , safety_checker=lowerCAmelCase , scheduler=lowerCAmelCase , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type="""np""" , )
lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 155
|
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = ['audio_values', 'audio_mask']
def __init__( self : Optional[int] , lowerCAmelCase : List[str]=2048 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Optional[Any]=[16, 16] , lowerCAmelCase : Optional[Any]=128 , lowerCAmelCase : Union[str, Any]=4_4100 , lowerCAmelCase : Any=86 , lowerCAmelCase : List[Any]=2048 , lowerCAmelCase : List[str]=0.0 , **lowerCAmelCase : Any , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
lowerCAmelCase = spectrogram_length
lowerCAmelCase = num_channels
lowerCAmelCase = patch_size
lowerCAmelCase = feature_size // self.patch_size[1]
lowerCAmelCase = n_fft
lowerCAmelCase = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase = sampling_rate
lowerCAmelCase = padding_value
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __lowercase ( self : int , lowerCAmelCase : np.array ):
lowerCAmelCase = spectrogram(
lowerCAmelCase , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = log_spec - 20.0
lowerCAmelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Dict , lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : Optional[bool] = True , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , **lowerCAmelCase : Dict , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCAmelCase = isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase = is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
lowerCAmelCase = np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
lowerCAmelCase = [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase = np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase = np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase = padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
lowerCAmelCase = audio_features[i]
lowerCAmelCase = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
lowerCAmelCase = {"""audio_values""": padded_audio_features}
lowerCAmelCase = BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 155
| 1
|
'''simple docstring'''
import math
import sys
def _lowerCamelCase ( lowerCamelCase_ : str ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = ''
try:
with open(lowerCamelCase_ , 'rb' ) as binary_file:
UpperCAmelCase_ : str = binary_file.read()
for dat in data:
UpperCAmelCase_ : List[Any] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCamelCase ( lowerCamelCase_ : str ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {'0': '0', '1': '1'}
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = '', ''
UpperCAmelCase_ : List[Any] = len(lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCAmelCase_ : Optional[int] = lexicon[curr_string]
result += last_match_id
UpperCAmelCase_ : Optional[int] = last_match_id + '0'
if math.loga(lowerCamelCase_ ).is_integer():
UpperCAmelCase_ : List[Any] = {}
for curr_key in list(lowerCamelCase_ ):
UpperCAmelCase_ : str = lexicon.pop(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = new_lex
UpperCAmelCase_ : int = last_match_id + '1'
index += 1
UpperCAmelCase_ : str = ''
return result
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = 8
try:
with open(lowerCamelCase_ , 'wb' ) as opened_file:
UpperCAmelCase_ : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCamelCase_ ) , lowerCamelCase_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowerCamelCase_ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCamelCase ( lowerCamelCase_ : str ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCAmelCase_ : List[Any] = data_bits[counter:]
UpperCAmelCase_ : Optional[int] = data_bits[counter + 1 :]
return data_bits
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCAmelCase_ : int = read_file_binary(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = remove_prefix(lowerCamelCase_ )
UpperCAmelCase_ : Dict = decompress_data(lowerCamelCase_ )
write_file_binary(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 274
|
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : List[Any] = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , snake_case_=None , snake_case_=None , *snake_case_ , **snake_case_ ):
'''simple docstring'''
super().__init__(*snake_case_ , **snake_case_ )
if config is None:
assert isinstance(self.model , snake_case_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
UpperCAmelCase_ : Tuple = self.model.config
else:
UpperCAmelCase_ : Optional[Any] = config
UpperCAmelCase_ : Optional[Any] = data_args
UpperCAmelCase_ : Dict = self.config.tgt_vocab_size if isinstance(self.config , snake_case_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
' padding..' )
if self.args.label_smoothing == 0:
UpperCAmelCase_ : Dict = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
UpperCAmelCase_ : Union[str, Any] = label_smoothed_nll_loss
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
if self.optimizer is None:
UpperCAmelCase_ : Optional[Any] = ['bias', 'LayerNorm.weight']
UpperCAmelCase_ : Union[str, Any] = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
UpperCAmelCase_ : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
UpperCAmelCase_ : List[str] = Adafactor
UpperCAmelCase_ : int = {'scale_parameter': False, 'relative_step': False}
else:
UpperCAmelCase_ : Union[str, Any] = AdamW
UpperCAmelCase_ : Optional[int] = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
UpperCAmelCase_ : Optional[int] = self.args.learning_rate
if self.sharded_ddp:
UpperCAmelCase_ : Optional[Any] = OSS(
params=snake_case_ , optim=snake_case_ , **snake_case_ , )
else:
UpperCAmelCase_ : Tuple = optimizer_cls(snake_case_ , **snake_case_ )
if self.lr_scheduler is None:
UpperCAmelCase_ : int = self._get_lr_scheduler(snake_case_ )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : int = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
UpperCAmelCase_ : List[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
UpperCAmelCase_ : Optional[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
UpperCAmelCase_ : int = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case_ )
return scheduler
def _UpperCamelCase ( self ):
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
UpperCAmelCase_ : Any = model(**snake_case_ , use_cache=snake_case_ )[0]
UpperCAmelCase_ : int = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = model(**snake_case_ , labels=snake_case_ , use_cache=snake_case_ )[:2]
else:
# compute label smoothed loss
UpperCAmelCase_ : List[str] = model(**snake_case_ , use_cache=snake_case_ )[0]
UpperCAmelCase_ : Optional[int] = torch.nn.functional.log_softmax(snake_case_ , dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.loss_fn(snake_case_ , snake_case_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = inputs.pop('labels' )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self._compute_loss(snake_case_ , snake_case_ , snake_case_ )
return loss
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self._prepare_inputs(snake_case_ )
UpperCAmelCase_ : Union[str, Any] = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
UpperCAmelCase_ : Tuple = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **snake_case_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase_ : Tuple = self._pad_tensors_to_max_len(snake_case_ , gen_kwargs['max_length'] )
UpperCAmelCase_ : List[str] = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self._compute_loss(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ : Optional[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
UpperCAmelCase_ : str = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase_ : List[Any] = self._pad_tensors_to_max_len(snake_case_ , gen_kwargs['max_length'] )
return (loss, logits, labels)
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F''' padded to `max_length`={max_length}''' )
UpperCAmelCase_ : Tuple = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
UpperCAmelCase_ : Dict = tensor
return padded_tensor
| 274
| 1
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCAmelCase_ = {
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
lowerCAmelCase_ = {
'''169M''': 7_68,
'''430M''': 10_24,
'''1B5''': 20_48,
'''3B''': 25_60,
'''7B''': 40_96,
'''14B''': 51_20,
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = list(state_dict.keys() )
for name in state_dict_keys:
snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
# emb -> embedding
if name.startswith('''emb.''' ):
snake_case_ = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
snake_case_ = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
snake_case_ = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , SCREAMING_SNAKE_CASE__ )
# ffn -> feed_forward
snake_case_ = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , SCREAMING_SNAKE_CASE__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
snake_case_ = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
snake_case_ = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
snake_case_ = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
snake_case_ = '''rwkv.''' + name
snake_case_ = weight
return state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
snake_case_ = 50277
snake_case_ = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
snake_case_ = PreTrainedTokenizerFast(tokenizer_file=SCREAMING_SNAKE_CASE__ )
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
# 2. Build the config
snake_case_ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
snake_case_ = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''' )
snake_case_ = RwkvConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
# 3. Download model file then convert state_dict
snake_case_ = hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
snake_case_ = convert_state_dict(SCREAMING_SNAKE_CASE__ )
# 4. Split in shards and save
snake_case_, snake_case_ = shard_checkpoint(SCREAMING_SNAKE_CASE__ )
for shard_file, shard in shards.items():
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if index is not None:
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save the index as well
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ = json.dumps(SCREAMING_SNAKE_CASE__ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ ) + '''\n'''
f.write(SCREAMING_SNAKE_CASE__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
snake_case_ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
snake_case_ = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
snake_case_ = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
lowerCAmelCase_ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 8
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A : Union[str, Any] = imread(R"digital_image_processing/image_data/lena_small.jpg")
A : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert med.median_filter(_UpperCamelCase , 3 ).any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__lowerCAmelCase = bs.Burkes(imread(_UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__lowerCAmelCase = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 57
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.model'''}
__UpperCamelCase = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
__UpperCamelCase = {
'''google/rembert''': 256,
}
class UpperCamelCase ( __lowercase ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=False, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__="[CLS]", lowerCAmelCase__="[SEP]", lowerCAmelCase__="[UNK]", lowerCAmelCase__="[SEP]", lowerCAmelCase__="[PAD]", lowerCAmelCase__="[CLS]", lowerCAmelCase__="[MASK]", **lowerCAmelCase__, ) -> List[Any]:
super().__init__(
do_lower_case=UpperCAmelCase__, remove_space=UpperCAmelCase__, keep_accents=UpperCAmelCase__, bos_token=UpperCAmelCase__, eos_token=UpperCAmelCase__, unk_token=UpperCAmelCase__, sep_token=UpperCAmelCase__, pad_token=UpperCAmelCase__, cls_token=UpperCAmelCase__, mask_token=UpperCAmelCase__, **UpperCAmelCase__, )
snake_case_ = do_lower_case
snake_case_ = remove_space
snake_case_ = keep_accents
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor()
self.sp_model.Load(UpperCAmelCase__)
@property
def a_ ( self) -> List[Any]:
return len(self.sp_model)
def a_ ( self) -> int:
snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Union[str, Any]:
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self, lowerCAmelCase__) -> Dict:
snake_case_ = d
snake_case_ = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=False) -> Any:
snake_case_ = self.sp_model.EncodeAsPieces(UpperCAmelCase__)
return pieces
def a_ ( self, lowerCAmelCase__) -> Any:
return self.sp_model.PieceToId(UpperCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> Optional[int]:
return self.sp_model.IdToPiece(UpperCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> int:
snake_case_ = self.sp_model.decode_pieces(UpperCAmelCase__)
return out_string
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = False) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase__)) + [1] + ([0] * len(UpperCAmelCase__)) + [1]
return [1] + ([0] * len(UpperCAmelCase__)) + [1]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__):
logger.error('Vocabulary path ({}) should be a directory'.format(UpperCAmelCase__))
return
snake_case_ = os.path.join(
UpperCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase__):
copyfile(self.vocab_file, UpperCAmelCase__)
return (out_vocab_file,)
| 361
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(UpperCAmelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
def UpperCAmelCase ( ) -> None:
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423]
snake_case_ = math.log(len(UpperCAmelCase ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 312
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : int=3 , lowerCamelCase_ : Optional[Any]=18 , lowerCamelCase_ : Any=30 , lowerCamelCase_ : List[str]=400 , lowerCamelCase_ : Any=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Any=None , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Optional[int]=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , lowerCamelCase_ : Tuple=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , lowerCamelCase_ : Dict=True , ):
"""simple docstring"""
UpperCamelCase = size if size is not None else {"""height""": 224, """width""": 224}
UpperCamelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
UpperCamelCase = do_convert_rgb
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : Optional[int]=False ):
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
UpperCamelCase = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
UpperCamelCase = []
for i in range(self.batch_size ):
UpperCamelCase = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
UpperCamelCase = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
UpperCamelCase = [torch.from_numpy(A__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = ChineseCLIPImageProcessingTester(self , do_center_crop=A__ )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , """do_resize""" ) )
self.assertTrue(hasattr(A__ , """size""" ) )
self.assertTrue(hasattr(A__ , """do_center_crop""" ) )
self.assertTrue(hasattr(A__ , """center_crop""" ) )
self.assertTrue(hasattr(A__ , """do_normalize""" ) )
self.assertTrue(hasattr(A__ , """image_mean""" ) )
self.assertTrue(hasattr(A__ , """image_std""" ) )
self.assertTrue(hasattr(A__ , """do_convert_rgb""" ) )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 224, """width""": 224} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=A__ )
UpperCamelCase = 3
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , """do_resize""" ) )
self.assertTrue(hasattr(A__ , """size""" ) )
self.assertTrue(hasattr(A__ , """do_center_crop""" ) )
self.assertTrue(hasattr(A__ , """center_crop""" ) )
self.assertTrue(hasattr(A__ , """do_normalize""" ) )
self.assertTrue(hasattr(A__ , """image_mean""" ) )
self.assertTrue(hasattr(A__ , """image_std""" ) )
self.assertTrue(hasattr(A__ , """do_convert_rgb""" ) )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 343
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
A_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 192
| 0
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_UpperCamelCase = Lock()
def lowercase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowerCAmelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__UpperCAmelCase : List[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__UpperCAmelCase : List[Any] = min(lowerCAmelCase__ , lowerCAmelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowerCAmelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__UpperCAmelCase : List[Any] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__UpperCAmelCase : Dict = max(lowerCAmelCase__ , lowerCAmelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowerCAmelCase__ )
def lowercase_ ( lowerCAmelCase__ : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Union[str, Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__UpperCAmelCase : int = Pipe()
__UpperCAmelCase : str = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__UpperCAmelCase : Dict = temp_rs
__UpperCAmelCase : List[Any] = temp_rr
for i in range(1 , len(lowerCAmelCase__ ) - 1 ):
__UpperCAmelCase : Any = Pipe()
__UpperCAmelCase : str = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__UpperCAmelCase : Dict = temp_rs
__UpperCAmelCase : Union[str, Any] = temp_rr
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(
len(lowerCAmelCase__ ) - 1,
arr[len(lowerCAmelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowerCAmelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowerCAmelCase__ ) ):
__UpperCAmelCase : Optional[int] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Dict = list(range(10 , 0 , -1 ) )
print("""Initial List""" )
print(*lowerCAmelCase__ )
__UpperCAmelCase : List[Any] = odd_even_transposition(lowerCAmelCase__ )
print("""Sorted List\n""" )
print(*lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 352
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _A :
def __init__( self , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=64 , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : str = np.random.default_rng(__UpperCAmelCase )
__UpperCAmelCase : List[str] = length
__UpperCAmelCase : List[Any] = rng.normal(size=(length,) ).astype(np.floataa )
__UpperCAmelCase : Union[str, Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Dict:
'''simple docstring'''
return self.length
def __getitem__( self , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class _A ( torch.nn.Module ):
def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> int:
'''simple docstring'''
super().__init__()
__UpperCAmelCase : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__UpperCAmelCase : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__UpperCAmelCase : Any = True
def __A ( self , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__UpperCAmelCase : Optional[int] = False
return x * self.a[0] + self.b[0]
class _A ( torch.nn.Module ):
def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Tuple = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() )
__UpperCAmelCase : List[str] = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() )
__UpperCAmelCase : str = True
def __A ( self , __UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__UpperCAmelCase : int = False
return x * self.a + self.b
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
__UpperCAmelCase : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__UpperCAmelCase : List[str] = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
__UpperCAmelCase : Tuple = load_dataset("""csv""" , data_files=lowerCAmelCase__ )
__UpperCAmelCase : Optional[Any] = datasets["""train"""].unique("""label""" )
__UpperCAmelCase : str = {v: i for i, v in enumerate(lowerCAmelCase__ )}
def tokenize_function(lowerCAmelCase__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : List[Any] = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" )
if "label" in examples:
__UpperCAmelCase : Optional[Any] = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCAmelCase : Tuple = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowerCAmelCase__ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCAmelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__UpperCAmelCase : Optional[Any] = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=2 )
__UpperCAmelCase : List[Any] = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 16
| 0
|
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = [1]
lowerCamelCase , lowerCamelCase , lowerCamelCase = 0, 0, 0
lowerCamelCase = ugly_nums[ia] * 2
lowerCamelCase = ugly_nums[ia] * 3
lowerCamelCase = ugly_nums[ia] * 5
for _ in range(1 , lowerCamelCase__ ):
lowerCamelCase = min(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
ugly_nums.append(lowerCamelCase__ )
if next_num == next_a:
ia += 1
lowerCamelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowerCamelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowerCamelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_00) = }""")
| 252
|
import re
import string
import numpy as np
import datasets
UpperCAmelCase : List[str] = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
UpperCAmelCase : str = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
UpperCAmelCase : Dict = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def __A ( self , A , A , A=None , A=False , A=False , A=False , ) -> List[str]:
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCamelCase = np.array([re.sub(A , """""" , A ) for x in predictions] )
lowerCamelCase = np.array([re.sub(A , """""" , A ) for x in references] )
else:
lowerCamelCase = np.asarray(A )
lowerCamelCase = np.asarray(A )
if ignore_case:
lowerCamelCase = np.char.lower(A )
lowerCamelCase = np.char.lower(A )
if ignore_punctuation:
lowerCamelCase = string.punctuation.maketrans("""""" , """""" , string.punctuation )
lowerCamelCase = np.char.translate(A , table=A )
lowerCamelCase = np.char.translate(A , table=A )
if ignore_numbers:
lowerCamelCase = string.digits.maketrans("""""" , """""" , string.digits )
lowerCamelCase = np.char.translate(A , table=A )
lowerCamelCase = np.char.translate(A , table=A )
lowerCamelCase = predictions == references
return {"exact_match": np.mean(A ) * 1_00}
| 252
| 1
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCamelCase_( _snake_case : Union[str, Any]=32 , _snake_case : Any=10 , _snake_case : Optional[Any]=100 , _snake_case : List[Any]=1026 , _snake_case : List[str]=True , _snake_case : str="data/tokenized_stories_train_wikitext103.jbl" , _snake_case : List[str]="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
__a , __a =generate_datasets(
_snake_case , _snake_case , number=_snake_case , min_len=1026 , trim=_snake_case )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__a =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
__a =load_gpta('gpt2' ).to(_snake_case )
print('computing perplexity on objective set' )
__a =compute_perplexity(_snake_case , _snake_case , _snake_case ).item()
print('perplexity on objective set:' , _snake_case )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : str=15 , _snake_case : Dict=128 , _snake_case : Union[str, Any]=100 , _snake_case : Dict="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
__a =GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
__a =SecondaryLearner(_snake_case )
# Train secondary learner
__a =train_secondary_learner(
_snake_case , _snake_case , max_epochs=_snake_case , batch_size=_snake_case , eval_freq=100 , igf_model_path=_snake_case , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : List[str]=32 , _snake_case : int=1000 , _snake_case : Union[str, Any]=16 , _snake_case : str=1.0 , _snake_case : Union[str, Any]=recopy_gpta , _snake_case : Union[str, Any]=None , _snake_case : int=10 , _snake_case : Any="gpt2_finetuned.pt" , ):
"""simple docstring"""
__a =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
__a =RandomSampler(_snake_case )
__a =DataLoader(_snake_case , sampler=_snake_case )
__a =max_steps // (len(_snake_case )) + 1
__a =0
__a =torch.zeros((1, context_len) , dtype=torch.long , device=_snake_case )
__a , __a , __a =recopy_model(_snake_case , _snake_case , _snake_case )
model.train()
if secondary_learner is not None:
secondary_learner.to(_snake_case )
secondary_learner.eval()
__a =[]
__a =0
__a =[]
__a =[]
# Compute the performance of the transformer model at the beginning
__a =compute_perplexity(_snake_case , _snake_case , _snake_case )
test_perps.append(_snake_case )
print('Test perplexity, step' , _snake_case , ':' , _snake_case )
for epoch in range(int(_snake_case ) ):
for step, example in enumerate(_snake_case ):
torch.cuda.empty_cache()
__a =random.randint(0 , example.size(2 ) - context_len - 1 )
__a =example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__a =model(_snake_case , labels=_snake_case )
__a =True
if secondary_learner is not None:
__a =secondary_learner.forward(
torch.tensor(_snake_case , dtype=torch.long , device=_snake_case ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_snake_case ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__a =-1
if predicted_q < threshold:
__a =False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__a =outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__a =0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__a =compute_perplexity(_snake_case , _snake_case , _snake_case )
test_perps.append(_snake_case )
print('Test perplexity, step' , _snake_case , ':' , _snake_case )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _snake_case )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCamelCase_( ):
"""simple docstring"""
__a =argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' , default=_snake_case , type=_snake_case , required=_snake_case , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=_snake_case , type=_snake_case , required=_snake_case , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=_snake_case , default=_snake_case , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=_snake_case , default=_snake_case , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=_snake_case , type=_snake_case , required=_snake_case , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=_snake_case , type=_snake_case , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=_snake_case , default=_snake_case , help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' , default=32 , type=_snake_case , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=_snake_case , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=_snake_case , help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' , default=1000 , type=_snake_case , help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=_snake_case , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=_snake_case , help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' , default=10 , type=_snake_case , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=_snake_case , help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' , default=1026 , type=_snake_case , help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=_snake_case , help='number of epochs to train secondary learner' )
parser.add_argument('--trim' , default=_snake_case , type=_snake_case , help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' , default=1.0 , type=_snake_case , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=_snake_case , help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' , default=_snake_case , type=_snake_case , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_snake_case , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
__a =joblib.load('data/IGF_values.jbl' )
# Train secondary learner
__a =training_secondary_learner(
_snake_case , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
__a =GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__a , __a =generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=_snake_case )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_snake_case , _snake_case , _snake_case , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_snake_case , secondary_learner=_snake_case , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 366
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __magic_name__ ( unittest.TestCase , lowerCAmelCase_ ):
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =load_tool('text-to-speech' )
self.tool.setup()
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 308
| 0
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCAmelCase = filter(lambda SCREAMING_SNAKE_CASE_ : p.requires_grad , model.parameters() )
__lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCamelCase__ = logging.getLogger(__name__)
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ):
if metric == "rouge2":
__lowerCAmelCase = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
__lowerCAmelCase = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
__lowerCAmelCase = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
__lowerCAmelCase = ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , monitor=F"""val_{metric}""" , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , )
class a__ ( pl.Callback ):
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A=True ):
"""simple docstring"""
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowerCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
__lowerCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowerCAmelCase = od / "test_results.txt"
__lowerCAmelCase = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , "a+" ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCAmelCase = metrics[key]
if isinstance(_A , torch.Tensor ):
__lowerCAmelCase = val.item()
__lowerCAmelCase = f"""{key}: {val:.6f}\n"""
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
__lowerCAmelCase = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(_A )
@rank_zero_only
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
try:
__lowerCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCAmelCase = pl_module.model.num_parameters()
__lowerCAmelCase = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , "test" )
@rank_zero_only
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 92
|
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ):
A__ , A__ = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335
| 0
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = set()
__SCREAMING_SNAKE_CASE = []
def parse_line(a__ ):
for line in fp:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(SCREAMING_SNAKE_CASE_ ) > 0:
__SCREAMING_SNAKE_CASE = """\n""".join(SCREAMING_SNAKE_CASE_ )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(SCREAMING_SNAKE_CASE_ )
buffer.clear()
continue
else:
__SCREAMING_SNAKE_CASE = line.strip()
buffer.append(SCREAMING_SNAKE_CASE_ )
if from_gh:
for filename in os.listdir(SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename != "warnings.txt":
continue
with open(SCREAMING_SNAKE_CASE_ ) as fp:
parse_line(SCREAMING_SNAKE_CASE_ )
else:
try:
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(SCREAMING_SNAKE_CASE_ ) as fp:
parse_line(SCREAMING_SNAKE_CASE_ )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = set()
__SCREAMING_SNAKE_CASE = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
return selected_warnings
if __name__ == "__main__":
def a__ ( a__ ):
"""simple docstring"""
return values.split(""",""" )
UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
UpperCAmelCase : Tuple = parser.parse_args()
UpperCAmelCase : int = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCAmelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCAmelCase : Dict = extract_warnings(args.output_dir, args.targets)
UpperCAmelCase : Union[str, Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 366
|
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
UpperCAmelCase : Any = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
UpperCAmelCase : Optional[Any] = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
UpperCAmelCase : Optional[int] = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
UpperCAmelCase : List[str] = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
UpperCAmelCase : List[Any] = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def a__ ( a__ , a__ ):
"""simple docstring"""
for tf_name, hf_name in patterns:
__SCREAMING_SNAKE_CASE = k.replace(a__ , a__ )
return k
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BigBirdPegasusConfig(**a__ )
__SCREAMING_SNAKE_CASE = BigBirdPegasusForConditionalGeneration(a__ )
__SCREAMING_SNAKE_CASE = torch_model.state_dict()
__SCREAMING_SNAKE_CASE = {}
# separating decoder weights
__SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
__SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
__SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE]
if any(a__ ):
continue
__SCREAMING_SNAKE_CASE = DECODER_PATTERNS
__SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
__SCREAMING_SNAKE_CASE = v.T
__SCREAMING_SNAKE_CASE = torch.from_numpy(a__ )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
__SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE]
if any(a__ ):
continue
__SCREAMING_SNAKE_CASE = REMAINING_PATTERNS
__SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
__SCREAMING_SNAKE_CASE = v.T
__SCREAMING_SNAKE_CASE = torch.from_numpy(a__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
__SCREAMING_SNAKE_CASE = mapping["""model.embed_positions.weight"""]
__SCREAMING_SNAKE_CASE = mapping.pop("""model.embed_positions.weight""" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch_model.load_state_dict(a__ , strict=a__ )
__SCREAMING_SNAKE_CASE = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tf.train.list_variables(a__ )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = ["""global_step"""]
for name, shape in tqdm(a__ , desc="""converting tf checkpoint to dict""" ):
__SCREAMING_SNAKE_CASE = any(pat in name for pat in ignore_name )
if skip_key:
continue
__SCREAMING_SNAKE_CASE = tf.train.load_variable(a__ , a__ )
__SCREAMING_SNAKE_CASE = array
return tf_weights
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_tf_weights_as_numpy(a__ )
__SCREAMING_SNAKE_CASE = convert_bigbird_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase : int = parser.parse_args()
UpperCAmelCase : Dict = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 331
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
lowerCAmelCase_ = ViTImageProcessor if is_vision_available() else None
@property
def __a ( self : Tuple ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (3, 32, 1_28)
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
SCREAMING_SNAKE_CASE__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
SCREAMING_SNAKE_CASE__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , _lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_lowercase , _lowercase )
def __a ( self : Tuple , **_lowercase : Any ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def __a ( self : List[Any] , **_lowercase : Optional[Any] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowercase )
def __a ( self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) )
return image_input
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(_lowercase , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processor(images=_lowercase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
SCREAMING_SNAKE_CASE__ = """test"""
SCREAMING_SNAKE_CASE__ = processor(text=_lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
SCREAMING_SNAKE_CASE__ = """test"""
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.char_decode(_lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(_lowercase )
SCREAMING_SNAKE_CASE__ = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(_lowercase , _lowercase )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
SCREAMING_SNAKE_CASE__ = torch.randn(1 , 27 , 38 )
SCREAMING_SNAKE_CASE__ = torch.randn(1 , 27 , 5_02_57 )
SCREAMING_SNAKE_CASE__ = torch.randn(1 , 27 , 3_05_22 )
SCREAMING_SNAKE_CASE__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 219
|
import socket
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE__ = socket.gethostname()
SCREAMING_SNAKE_CASE__ = 1_23_12
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
SCREAMING_SNAKE_CASE__ = sock.recv(10_24 )
if not data:
break
out_file.write(__UpperCamelCase )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 219
| 1
|
import re
def __snake_case ( _UpperCAmelCase ):
__a = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
__snake_case :Dict = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 131
|
def __snake_case ( _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('''Input value must be an \'int\' type''' )
__a = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131
| 1
|
from __future__ import annotations
UpperCAmelCase_ : Tuple = []
def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] , __A : int , __A : int ) -> bool:
"""simple docstring"""
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ):
if board[i][j] == 1:
return False
return True
def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] , __A : int ) -> bool:
"""simple docstring"""
if row >= len(__A ):
solution.append(__A )
printboard(__A )
print()
return True
for i in range(len(__A ) ):
if is_safe(__A , __A , __A ):
a_ : Any = 1
solve(__A , row + 1 )
a_ : Tuple = 0
return False
def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] ) -> None:
"""simple docstring"""
for i in range(len(__A ) ):
for j in range(len(__A ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
UpperCAmelCase_ : List[str] = 8
UpperCAmelCase_ : str = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 32
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : str = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 127
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
lowerCamelCase_ = '''dinat'''
lowerCamelCase_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowercase=4 , lowercase=3 , lowercase=6_4 , lowercase=[3, 4, 6, 5] , lowercase=[2, 4, 8, 1_6] , lowercase=7 , lowercase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowercase=3.0 , lowercase=True , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase="gelu" , lowercase=0.02 , lowercase=1E-5 , lowercase=0.0 , lowercase=None , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Union[str, Any] = patch_size
A_ : Union[str, Any] = num_channels
A_ : int = embed_dim
A_ : Dict = depths
A_ : List[Any] = len(lowercase )
A_ : Tuple = num_heads
A_ : str = kernel_size
A_ : int = dilations
A_ : Any = mlp_ratio
A_ : Dict = qkv_bias
A_ : Any = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Optional[Any] = drop_path_rate
A_ : int = hidden_act
A_ : Union[str, Any] = layer_norm_eps
A_ : str = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ : Any = int(embed_dim * 2 ** (len(lowercase ) - 1) )
A_ : Tuple = layer_scale_init_value
A_ : Tuple = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
A_ : Tuple = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
| 355
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase = {
"""squeezebert/squeezebert-uncased""": 512,
"""squeezebert/squeezebert-mnli""": 512,
"""squeezebert/squeezebert-mnli-headless""": 512,
}
_UpperCAmelCase = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = SqueezeBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
A_ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowercase ) != tokenize_chinese_chars
):
A_ : Dict = getattr(lowercase , normalizer_state.pop('type' ) )
A_ : Optional[int] = do_lower_case
A_ : Optional[Any] = strip_accents
A_ : str = tokenize_chinese_chars
A_ : Any = normalizer_class(**lowercase )
A_ : Tuple = do_lower_case
def lowerCAmelCase_ ( self , lowercase , lowercase=None ):
"""simple docstring"""
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Dict = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Dict = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 192
| 0
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = CLIPTokenizer
__UpperCAmelCase : Union[str, Any] = CLIPTokenizerFast
__UpperCAmelCase : str = True
__UpperCAmelCase : int = {}
__UpperCAmelCase : List[str] = False
def __snake_case ( self : str ) -> Union[str, Any]:
super().setUp()
# fmt: off
__snake_case : int = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__snake_case : Any = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__snake_case : Any = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
__snake_case : Optional[int] = {"unk_token": "<unk>"}
__snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase ) )
def __snake_case ( self : Tuple , **lowerCamelCase : List[Any] ) -> Any:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __snake_case ( self : Union[str, Any] , **lowerCamelCase : str ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __snake_case ( self : Optional[int] , lowerCamelCase : List[Any] ) -> Any:
__snake_case : Any = "lower newer"
__snake_case : Any = "lower newer"
return input_text, output_text
def __snake_case ( self : Any ) -> int:
__snake_case : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case : List[str] = "lower newer"
__snake_case : List[str] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
__snake_case : int = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__snake_case : Union[str, Any] = tokens + [tokenizer.unk_token]
__snake_case : Dict = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
@require_ftfy
def __snake_case ( self : int ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__snake_case : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__snake_case : Tuple = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
__snake_case : int = tokenizer_s.tokenize(lowerCamelCase )
__snake_case : Dict = tokenizer_r.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__snake_case : int = "xa\u0303y" + " " + "x\xe3y"
__snake_case : str = tokenizer_s.tokenize(lowerCamelCase )
__snake_case : Optional[int] = tokenizer_r.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# Test that the tokenization is identical on unicode of space type
__snake_case : Optional[int] = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__snake_case : Union[str, Any] = tokenizer_s.tokenize(lowerCamelCase )
__snake_case : Optional[int] = tokenizer_r.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# Test that the tokenization is identical on unicode of line break type
__snake_case : Tuple = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__snake_case : str = tokenizer_s.tokenize(lowerCamelCase )
__snake_case : List[str] = tokenizer_r.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__snake_case : Optional[Any] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
__snake_case : Tuple = F'{text_of_1_token} {text_of_1_token}'
__snake_case : str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , )
__snake_case : Optional[int] = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase ) + 1, len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
__snake_case : Union[str, Any] = F' {text}'
__snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase , use_fast=lowerCamelCase , )
__snake_case : Optional[Any] = tokenizer_r(lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase ) + 1, 1 + len(lowerCamelCase ) + 1 + len(lowerCamelCase )) , )
def __snake_case ( self : Union[str, Any] ) -> List[str]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCamelCase ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def __snake_case ( self : Optional[Any] ) -> List[Any]:
super().test_tokenization_python_rust_equals()
def __snake_case ( self : Dict ) -> Union[str, Any]:
# CLIP always lower cases letters
pass
| 123
|
def lowerCAmelCase_ ( __lowerCamelCase ):
if edge <= 0 or not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def lowerCAmelCase_ ( __lowerCamelCase ):
if edge <= 0 or not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError("Length must be a positive." )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123
| 1
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __a : Any , __a : int=1_00 , __a : Dict=13 , __a : Union[str, Any]=30 , __a : Any=2 , __a : Optional[Any]=3 , __a : Optional[Any]=True , __a : Optional[Any]=True , __a : List[Any]=32 , __a : Optional[int]=5 , __a : int=4 , __a : Any=37 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : List[str]=0.1 , __a : Dict=10 , __a : str=0.02 , __a : int=3 , ):
_a = parent
_a = vocab_size
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a = (image_size // patch_size) ** 2
_a = num_patches + 1
def UpperCamelCase__ ( self : int ):
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def UpperCamelCase__ ( self : Dict , __a : Tuple , __a : str , __a : Dict ):
_a = FlaxBeitModel(config=__a )
_a = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : List[str] ):
_a = FlaxBeitForMaskedImageModeling(config=__a )
_a = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase__ ( self : int , __a : Any , __a : Any , __a : Optional[Any] ):
_a = self.type_sequence_label_size
_a = FlaxBeitForImageClassification(config=__a )
_a = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a = 1
_a = FlaxBeitForImageClassification(__a )
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(__a )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =(
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def UpperCamelCase__ ( self : Dict ):
_a = FlaxBeitModelTester(self )
_a = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def UpperCamelCase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : Dict ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__a )
_a = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def UpperCamelCase__ ( self : Tuple ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_a = self._prepare_for_class(__a , __a )
_a = model_class(__a )
@jax.jit
def model_jitted(__a : Union[str, Any] , **__a : Optional[Any] ):
return model(pixel_values=__a , **__a )
with self.subTest("JIT Enabled" ):
_a = model_jitted(**__a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_a = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase__ ( self : int ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ ( self : List[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def UpperCamelCase__ ( self : int ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def UpperCamelCase__ ( self : Optional[Any] ):
for model_class_name in self.all_model_classes:
_a = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
_a = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(__a )
def _lowerCamelCase ( ) -> Optional[Any]:
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@require_flax
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self : Union[str, Any] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self : str ):
_a = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=__a , return_tensors="np" ).pixel_values
# prepare bool_masked_pos
_a = np.ones((1, 1_96) , dtype=__a )
# forward pass
_a = model(pixel_values=__a , bool_masked_pos=__a )
_a = outputs.logits
# verify the logits
_a = (1, 1_96, 81_92)
self.assertEqual(logits.shape , __a )
_a = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , __a , atol=1e-2 ) )
@slow
def UpperCamelCase__ ( self : Tuple ):
_a = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=__a , return_tensors="np" )
# forward pass
_a = model(**__a )
_a = outputs.logits
# verify the logits
_a = (1, 10_00)
self.assertEqual(logits.shape , __a )
_a = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , __a , atol=1e-4 ) )
_a = 2_81
self.assertEqual(logits.argmax(-1 ).item() , __a )
@slow
def UpperCamelCase__ ( self : List[Any] ):
_a = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=__a , return_tensors="np" )
# forward pass
_a = model(**__a )
_a = outputs.logits
# verify the logits
_a = (1, 2_18_41)
self.assertEqual(logits.shape , __a )
_a = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , __a , atol=1e-4 ) )
_a = 23_96
self.assertEqual(logits.argmax(-1 ).item() , __a )
| 346
|
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
lowerCAmelCase_ : List[Any] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = {'facebook/bart-base': BartForConditionalGeneration}
lowerCAmelCase_ : int = {'facebook/bart-base': BartTokenizer}
def _lowerCamelCase ( ) -> Union[str, Any]:
_a = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=lowercase , default=lowercase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=lowercase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=lowercase , default=lowercase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=lowercase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=lowercase , )
parser.add_argument(
"--config_name" , type=lowercase , default=lowercase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=lowercase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=lowercase , default=lowercase , help="Where to store the final ONNX file." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple="cpu" ) -> Optional[Any]:
_a = model_dict[model_name].from_pretrained(lowercase ).to(lowercase )
_a = tokenizer_dict[model_name].from_pretrained(lowercase )
if model_name in ["facebook/bart-base"]:
_a = 0
_a = None
_a = 0
return huggingface_model, tokenizer
def _lowerCamelCase ( lowercase : List[str] , lowercase : Tuple , lowercase : int , lowercase : Any , lowercase : Dict ) -> Any:
model.eval()
_a = None
_a = torch.jit.script(BARTBeamSearchGenerator(lowercase ) )
with torch.no_grad():
_a = "My friends are cool but they eat too many carbs."
_a = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device )
_a = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=lowercase , max_length=lowercase , early_stopping=lowercase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowercase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowercase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=lowercase , )
logger.info("Model exported to {}".format(lowercase ) )
_a = remove_dup_initializers(os.path.abspath(lowercase ) )
logger.info("Deduplicated and optimized model written to {}".format(lowercase ) )
_a = onnxruntime.InferenceSession(lowercase )
_a = ort_sess.run(
lowercase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(lowercase ),
"max_length": np.array(lowercase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def _lowerCamelCase ( ) -> Any:
_a = parse_args()
_a = 5
_a = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_a = torch.device(args.device )
_a , _a = load_model_tokenizer(args.model_name_or_path , lowercase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(lowercase )
if args.max_length:
_a = args.max_length
if args.num_beams:
_a = args.num_beams
if args.output_file_path:
_a = args.output_file_path
else:
_a = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(lowercase , lowercase , lowercase , lowercase , lowercase )
if __name__ == "__main__":
main()
| 346
| 1
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _UpperCAmelCase :
def __init__( self : List[str] , A : List[str] , A : List[str]=2 , A : Tuple=True , A : Union[str, Any]=False , A : Optional[int]=10 , A : Dict=3 , A : Union[str, Any]=32 * 8 , A : Union[str, Any]=32 * 8 , A : Optional[int]=4 , A : Any=64 , ) -> str:
lowercase_ : List[str] = parent
lowercase_ : Any = batch_size
lowercase_ : Union[str, Any] = is_training
lowercase_ : Optional[Any] = use_auxiliary_loss
lowercase_ : List[Any] = num_queries
lowercase_ : Any = num_channels
lowercase_ : Tuple = min_size
lowercase_ : List[str] = max_size
lowercase_ : str = num_labels
lowercase_ : Tuple = hidden_dim
lowercase_ : int = hidden_dim
def A ( self : int ) -> Dict:
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A )
lowercase_ : Dict = torch.ones([self.batch_size, self.min_size, self.max_size] , device=A )
lowercase_ : Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=A ) > 0.5
).float()
lowercase_ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=A ) > 0.5).long()
lowercase_ : List[str] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A ( self : Optional[int] ) -> Optional[Any]:
lowercase_ : Optional[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowercase_ : str = self.num_queries
lowercase_ : Dict = self.num_labels
lowercase_ : Union[str, Any] = [1, 1, 1, 1]
lowercase_ : Tuple = self.num_channels
lowercase_ : int = 64
lowercase_ : Optional[Any] = 1_28
lowercase_ : int = self.hidden_dim
lowercase_ : List[Any] = self.hidden_dim
lowercase_ : Union[str, Any] = self.hidden_dim
return config
def A ( self : Optional[int] ) -> Dict:
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = self.prepare_config_and_inputs()
lowercase_ : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def A ( self : List[Any] , A : int , A : int ) -> Tuple:
lowercase_ : List[str] = output.encoder_hidden_states
lowercase_ : Tuple = output.pixel_decoder_hidden_states
lowercase_ : int = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A ) , config.decoder_layers )
def A ( self : Any , A : Optional[Any] , A : Optional[Any] , A : int , A : int=False ) -> Union[str, Any]:
with torch.no_grad():
lowercase_ : List[Any] = MaskaFormerModel(config=A )
model.to(A )
model.eval()
lowercase_ : Any = model(pixel_values=A , pixel_mask=A )
lowercase_ : Tuple = model(A , output_hidden_states=A )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A , A )
def A ( self : Optional[int] , A : Union[str, Any] , A : Tuple , A : str , A : Optional[Any] , A : Tuple ) -> Optional[Any]:
lowercase_ : Union[str, Any] = MaskaFormerForUniversalSegmentation(config=A )
model.to(A )
model.eval()
def comm_check_on_output(A : List[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase_ : Dict = model(pixel_values=A , pixel_mask=A )
lowercase_ : Dict = model(A )
comm_check_on_output(A )
lowercase_ : str = model(
pixel_values=A , pixel_mask=A , mask_labels=A , class_labels=A )
comm_check_on_output(A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[int] = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : int = False
def A ( self : Dict ) -> Dict:
lowercase_ : Any = MaskaFormerModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(self , config_class=A , has_text_modality=A )
def A ( self : Union[str, Any] ) -> List[str]:
self.config_tester.run_common_tests()
def A ( self : Optional[Any] ) -> Optional[Any]:
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A , **A , output_hidden_states=A )
def A ( self : List[str] ) -> Any:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def A ( self : Dict ) -> List[Any]:
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def A ( self : Union[str, Any] ) -> Optional[Any]:
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def A ( self : int ) -> List[Any]:
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def A ( self : Tuple ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def A ( self : Dict ) -> List[str]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : Tuple ) -> int:
pass
def A ( self : Optional[int] ) -> int:
lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(A )
lowercase_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Any = [*signature.parameters.keys()]
lowercase_ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
@slow
def A ( self : Tuple ) -> Optional[Any]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowercase_ : Tuple = MaskaFormerModel.from_pretrained(A )
self.assertIsNotNone(A )
def A ( self : Tuple ) -> List[Any]:
lowercase_ : Dict = (self.model_tester.min_size,) * 2
lowercase_ : Optional[Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=A ),
'''mask_labels''': torch.randn((2, 10, *size) , device=A ),
'''class_labels''': torch.zeros(2 , 10 , device=A ).long(),
}
lowercase_ : int = self.model_tester.get_config()
lowercase_ : str = MaskaFormerForUniversalSegmentation(A ).to(A )
lowercase_ : Optional[int] = model(**A )
self.assertTrue(outputs.loss is not None )
def A ( self : str ) -> List[str]:
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A , **A , output_hidden_states=A )
def A ( self : List[Any] ) -> Optional[int]:
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(A ).to(A )
lowercase_ : Any = model(**A , output_attentions=A )
self.assertTrue(outputs.attentions is not None )
def A ( self : int ) -> str:
if not self.model_tester.is_training:
return
lowercase_ : Any = self.all_model_classes[1]
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
lowercase_ : Tuple = model_class(A )
model.to(A )
model.train()
lowercase_ : Optional[int] = model(A , mask_labels=A , class_labels=A ).loss
loss.backward()
def A ( self : Any ) -> Union[str, Any]:
lowercase_ : Optional[int] = self.all_model_classes[1]
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowercase_ : int = True
lowercase_ : Tuple = True
lowercase_ : List[str] = model_class(A ).to(A )
model.train()
lowercase_ : Dict = model(A , mask_labels=A , class_labels=A )
lowercase_ : Optional[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowercase_ : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase_ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A : Any = 1E-4
def lowercase ( ):
lowercase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def A ( self : Optional[int] ) -> str:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A ( self : Optional[int] ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A ( self : Tuple ) -> Tuple:
lowercase_ : Optional[Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A )
lowercase_ : Any = self.default_image_processor
lowercase_ : int = prepare_img()
lowercase_ : Optional[int] = image_processor(A , return_tensors='''pt''' ).to(A )
lowercase_ : Dict = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A , (1, 3, 3_84, 3_84) )
with torch.no_grad():
lowercase_ : Dict = model(**A )
lowercase_ : List[str] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , A , atol=A ) )
lowercase_ : str = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , A , atol=A ) )
lowercase_ : Tuple = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , A , atol=A ) )
def A ( self : Union[str, Any] ) -> Dict:
lowercase_ : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A ).eval()
lowercase_ : Optional[int] = self.default_image_processor
lowercase_ : List[str] = prepare_img()
lowercase_ : List[Any] = image_processor(A , return_tensors='''pt''' ).to(A )
lowercase_ : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A , (1, 3, 3_84, 3_84) )
with torch.no_grad():
lowercase_ : List[Any] = model(**A )
# masks_queries_logits
lowercase_ : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowercase_ : Optional[int] = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
lowercase_ : str = torch.tensor(A ).to(A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , A , atol=A ) )
# class_queries_logits
lowercase_ : List[str] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowercase_ : Optional[Any] = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , A , atol=A ) )
def A ( self : Union[str, Any] ) -> Tuple:
lowercase_ : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A ).eval()
lowercase_ : List[str] = self.default_image_processor
lowercase_ : Union[str, Any] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowercase_ : Optional[int] = inputs['''pixel_values'''].to(A )
lowercase_ : Union[str, Any] = [el.to(A ) for el in inputs['''mask_labels''']]
lowercase_ : str = [el.to(A ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowercase_ : List[Any] = model(**A )
self.assertTrue(outputs.loss is not None )
| 33
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list:
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return [tuple(SCREAMING_SNAKE_CASE_ )]
lowerCAmelCase__ : Optional[Any] = []
def generate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , SCREAMING_SNAKE_CASE_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowerCAmelCase__ , lowerCAmelCase__ : str = arr[k - 1], arr[i]
else: # k is odd
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = arr[k - 1], arr[0]
generate(k - 1 , SCREAMING_SNAKE_CASE_ )
generate(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return res
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 212
| 0
|
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return number | (1 << position)
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return number & ~(1 << position)
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return number ^ (1 << position)
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
return ((number >> position) & 1) == 1
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Any = BertTokenizer
__snake_case : Dict = BertTokenizerFast
__snake_case : Tuple = True
__snake_case : List[Any] = True
__snake_case : Optional[Any] = filter_non_english
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase__ ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[9, 6, 7, 12, 10, 11] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
# With lower casing
SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer()
SCREAMING_SNAKE_CASE = """a\n'll !!to?'d of, can't."""
SCREAMING_SNAKE_CASE = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""]
self.assertListEqual(tokenizer.tokenize(lowerCamelCase__ ) ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=lowerCamelCase__ ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ,lowerCamelCase__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,return_offsets_mapping=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(lowerCamelCase__ ,"""do_lower_case""" ) else False
SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["""offset_mapping"""] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCamelCase__ )
]
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
| 193
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowercase__ : List[Any] = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> List[List[ImageInput]]:
if isinstance(__lowerCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowerCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowerCamelCase ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class SCREAMING_SNAKE_CASE (A_ ):
lowerCAmelCase = ["pixel_values"]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_snake_case)
__A : int = size if size is not None else {'''shortest_edge''': 256}
__A : Union[str, Any] = get_size_dict(_snake_case , default_to_square=_snake_case)
__A : List[str] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__A : Optional[Any] = get_size_dict(_snake_case , param_name='crop_size')
__A : List[Any] = do_resize
__A : Optional[int] = size
__A : Union[str, Any] = do_center_crop
__A : int = crop_size
__A : List[str] = resample
__A : int = do_rescale
__A : Tuple = rescale_factor
__A : List[Any] = offset
__A : Optional[int] = do_normalize
__A : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Optional[int] = get_size_dict(_snake_case , default_to_square=_snake_case)
if "shortest_edge" in size:
__A : Optional[int] = get_resize_output_image_size(_snake_case , size['shortest_edge'] , default_to_square=_snake_case)
elif "height" in size and "width" in size:
__A : Optional[Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}')
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Dict = get_size_dict(_snake_case)
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}')
return center_crop(_snake_case , size=(size['height'], size['width']) , data_format=_snake_case , **_snake_case)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : List[Any] = image.astype(np.floataa)
if offset:
__A : List[str] = image - (scale / 2)
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.')
# All transformations expect numpy arrays.
__A : Dict = to_numpy_array(_snake_case)
if do_resize:
__A : Union[str, Any] = self.resize(image=_snake_case , size=_snake_case , resample=_snake_case)
if do_center_crop:
__A : Optional[Any] = self.center_crop(_snake_case , size=_snake_case)
if do_rescale:
__A : List[Any] = self.rescale(image=_snake_case , scale=_snake_case , offset=_snake_case)
if do_normalize:
__A : List[Any] = self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case)
__A : List[str] = to_channel_dimension_format(_snake_case , _snake_case)
return image
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__A : int = resample if resample is not None else self.resample
__A : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : str = do_rescale if do_rescale is not None else self.do_rescale
__A : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Optional[Any] = offset if offset is not None else self.offset
__A : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__A : Any = image_mean if image_mean is not None else self.image_mean
__A : List[str] = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = size if size is not None else self.size
__A : Union[str, Any] = get_size_dict(_snake_case , default_to_square=_snake_case)
__A : Tuple = crop_size if crop_size is not None else self.crop_size
__A : Union[str, Any] = get_size_dict(_snake_case , param_name='crop_size')
if not valid_images(_snake_case):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
__A : Any = make_batched(_snake_case)
__A : Optional[int] = [
[
self._preprocess_image(
image=_snake_case , do_resize=_snake_case , size=_snake_case , resample=_snake_case , do_center_crop=_snake_case , crop_size=_snake_case , do_rescale=_snake_case , rescale_factor=_snake_case , offset=_snake_case , do_normalize=_snake_case , image_mean=_snake_case , image_std=_snake_case , data_format=_snake_case , )
for img in video
]
for video in videos
]
__A : Dict = {'''pixel_values''': videos}
return BatchFeature(data=_snake_case , tensor_type=_snake_case)
| 190
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16
| 0
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
_UpperCAmelCase : Tuple = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
_UpperCAmelCase : Tuple = {
"""allenai/longformer-base-4096""": 4_0_9_6,
"""allenai/longformer-large-4096""": 4_0_9_6,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4_0_9_6,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4_0_9_6,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __magic_name__( ):
__lowerCAmelCase = (
list(range(ord('''!'''), ord('''~''') + 1)) + list(range(ord('''¡'''), ord('''¬''') + 1)) + list(range(ord('''®'''), ord('''ÿ''') + 1))
)
__lowerCAmelCase = bs[:]
__lowerCAmelCase = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase)
cs.append(2**8 + n)
n += 1
__lowerCAmelCase = [chr(lowerCamelCase) for n in cs]
return dict(zip(lowerCamelCase, lowerCamelCase))
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = set()
__lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
__lowerCAmelCase = char
return pairs
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Any = VOCAB_FILES_NAMES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = ['input_ids', 'attention_mask']
def __init__(self , __lowercase , __lowercase , __lowercase="replace" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=False , **__lowercase , ):
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else bos_token
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else eos_token
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else sep_token
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else cls_token
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else unk_token
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
with open(__lowercase , encoding='''utf-8''' ) as vocab_handle:
__lowerCAmelCase = json.load(__lowercase )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
__lowerCAmelCase = errors # how to handle errors in decoding
__lowerCAmelCase = bytes_to_unicode()
__lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(__lowercase , encoding='''utf-8''' ) as merges_handle:
__lowerCAmelCase = merges_handle.read().split('''\n''' )[1:-1]
__lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = {}
__lowerCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCAmelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _snake_case (self ):
return len(self.encoder )
def _snake_case (self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case (self , __lowercase ):
if token in self.cache:
return self.cache[token]
__lowerCAmelCase = tuple(__lowercase )
__lowerCAmelCase = get_pairs(__lowercase )
if not pairs:
return token
while True:
__lowerCAmelCase = min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase , __lowerCAmelCase = bigram
__lowerCAmelCase = []
__lowerCAmelCase = 0
while i < len(__lowercase ):
try:
__lowerCAmelCase = word.index(__lowercase , __lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase = j
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase = tuple(__lowercase )
__lowerCAmelCase = new_word
if len(__lowercase ) == 1:
break
else:
__lowerCAmelCase = get_pairs(__lowercase )
__lowerCAmelCase = ''' '''.join(__lowercase )
__lowerCAmelCase = word
return word
def _snake_case (self , __lowercase ):
__lowerCAmelCase = []
for token in re.findall(self.pat , __lowercase ):
__lowerCAmelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(''' ''' ) )
return bpe_tokens
def _snake_case (self , __lowercase ):
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def _snake_case (self , __lowercase ):
return self.decoder.get(__lowercase )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = ''''''.join(__lowercase )
__lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _snake_case (self , __lowercase , __lowercase = None ):
if not os.path.isdir(__lowercase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + '''\n''' )
__lowerCAmelCase = 0
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__lowerCAmelCase = token_index
writer.write(''' '''.join(__lowercase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _snake_case (self , __lowercase , __lowercase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def _snake_case (self , __lowercase , __lowercase = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case (self , __lowercase , __lowercase=False , **__lowercase ):
__lowerCAmelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()):
__lowerCAmelCase = ''' ''' + text
return (text, kwargs)
| 9
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCAmelCase = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
__lowerCAmelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowercase , __lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self ):
shutil.rmtree(self.tmpdirname )
def _snake_case (self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase )
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowercase )
self.assertIsInstance(processor_fast.tokenizer , __lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowercase )
self.assertIsInstance(processor_fast.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
__lowerCAmelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' )
__lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = tokenizer(__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__lowercase )
__lowerCAmelCase = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 9
| 1
|
def _UpperCAmelCase (UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
_A : str = 0
_A : Tuple = len(UpperCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_A : Dict = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
_A : Union[str, Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
_A : List[str] = left
_A : Union[str, Any] = point
elif point > right:
_A : Any = right
_A : Any = point
else:
if item < current_item:
_A : Union[str, Any] = point - 1
else:
_A : List[str] = point + 1
return None
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_A : List[Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase__ , UpperCamelCase__ , point + 1 , UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ):
if collection != sorted(UpperCamelCase__ ):
raise ValueError("Collection must be ascending sorted" )
return True
if __name__ == "__main__":
import sys
lowerCAmelCase__ = 0
if debug == 1:
lowerCAmelCase__ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
lowerCAmelCase__ = 67
lowerCAmelCase__ = interpolation_search(collection, target)
if result is not None:
print(f"{target} found at positions: {result}")
else:
print('Not found')
| 11
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__snake_case : Tuple = random.Random()
def _UpperCAmelCase ( a__ , a__=1.0 , a__=None , a__=None):
'''simple docstring'''
if rng is None:
a_ : Optional[int] = global_rng
a_ : int = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
@require_torchaudio
class A__(unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=7 , _lowercase=400 , _lowercase=2_000 , _lowercase=10 , _lowercase=160 , _lowercase=8 , _lowercase=0.0 , _lowercase=4_000 , _lowercase=False , _lowercase=True , ) -> Dict:
a_ : Any = parent
a_ : Any = batch_size
a_ : Union[str, Any] = min_seq_length
a_ : Any = max_seq_length
a_ : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a_ : Dict = padding_value
a_ : Dict = sampling_rate
a_ : Optional[Any] = return_attention_mask
a_ : Dict = do_normalize
a_ : Any = feature_size
a_ : List[str] = chunk_length
a_ : Tuple = hop_length
def UpperCamelCase__ ( self ) -> Dict:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self , _lowercase=False , _lowercase=False ) -> Tuple:
def _flatten(_lowercase ):
return list(itertools.chain(*_lowercase ) )
if equal_length:
a_ : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a_ : Any = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a_ : Optional[Any] = [np.asarray(_lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__(a_, unittest.TestCase ):
"""simple docstring"""
_A : List[str] = WhisperFeatureExtractor if is_speech_available() else None
def UpperCamelCase__ ( self ) -> Tuple:
a_ : Any = WhisperFeatureExtractionTester(self )
def UpperCamelCase__ ( self ) -> Tuple:
a_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Optional[int] = feat_extract_first.save_pretrained(_lowercase )[0]
check_json_file_has_correct_format(_lowercase )
a_ : Optional[Any] = self.feature_extraction_class.from_pretrained(_lowercase )
a_ : Optional[int] = feat_extract_first.to_dict()
a_ : Union[str, Any] = feat_extract_second.to_dict()
a_ : List[str] = feat_extract_first.mel_filters
a_ : Optional[int] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowercase , _lowercase ) )
self.assertEqual(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : int = os.path.join(_lowercase , """feat_extract.json""" )
feat_extract_first.to_json_file(_lowercase )
a_ : str = self.feature_extraction_class.from_json_file(_lowercase )
a_ : Dict = feat_extract_first.to_dict()
a_ : Tuple = feat_extract_second.to_dict()
a_ : Dict = feat_extract_first.mel_filters
a_ : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowercase , _lowercase ) )
self.assertEqual(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
a_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a_ : int = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
a_ : List[str] = [np.asarray(_lowercase ) for speech_input in speech_inputs]
# Test feature size
a_ : str = feature_extractor(_lowercase , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
a_ : Any = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
a_ : Any = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1e-3 ) )
# Test batched
a_ : str = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
a_ : str = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
a_ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a_ : List[str] = np.asarray(_lowercase )
a_ : Optional[Any] = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
a_ : Dict = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1e-3 ) )
# Test truncation required
a_ : Optional[int] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
a_ : str = [np.asarray(_lowercase ) for speech_input in speech_inputs]
a_ : List[Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
a_ : Any = [np.asarray(_lowercase ) for speech_input in speech_inputs_truncated]
a_ : Union[str, Any] = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
a_ : str = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1e-3 ) )
def UpperCamelCase__ ( self ) -> Dict:
import torch
a_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a_ : List[str] = np.random.rand(100 , 32 ).astype(np.floataa )
a_ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a_ : int = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
a_ : Dict = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase__ ( self , _lowercase ) -> Dict:
a_ : Any = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
a_ : Union[str, Any] = ds.sort("""id""" ).select(range(_lowercase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCamelCase__ ( self ) -> int:
# fmt: off
a_ : Union[str, Any] = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
a_ : List[str] = self._load_datasamples(1 )
a_ : List[Any] = WhisperFeatureExtractor()
a_ : Union[str, Any] = feature_extractor(_lowercase , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _lowercase , atol=1e-4 ) )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
a_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a_ : Any = self._load_datasamples(1 )[0]
a_ : List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
a_ : Dict = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_lowercase )[0]
self.assertTrue(np.all(np.mean(_lowercase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowercase ) - 1 ) < 1e-3 ) )
| 248
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase):
_UpperCamelCase:Optional[Any] = 42
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase):
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = 88 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "geglu" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , )-> Optional[int]:
super().__init__()
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =attention_head_dim
lowerCamelCase_ =num_attention_heads * attention_head_dim
lowerCamelCase_ =in_channels
lowerCamelCase_ =torch.nn.GroupNorm(num_groups=SCREAMING_SNAKE_CASE_ , num_channels=SCREAMING_SNAKE_CASE_ , eps=1E-6 , affine=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. Define transformers blocks
lowerCamelCase_ =nn.ModuleList(
[
BasicTransformerBlock(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dropout=SCREAMING_SNAKE_CASE_ , cross_attention_dim=SCREAMING_SNAKE_CASE_ , activation_fn=SCREAMING_SNAKE_CASE_ , attention_bias=SCREAMING_SNAKE_CASE_ , double_self_attention=SCREAMING_SNAKE_CASE_ , norm_elementwise_affine=SCREAMING_SNAKE_CASE_ , )
for d in range(SCREAMING_SNAKE_CASE_ )
] )
lowerCamelCase_ =nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , )-> List[Any]:
lowerCamelCase_ =hidden_states.shape
lowerCamelCase_ =batch_frames // num_frames
lowerCamelCase_ =hidden_states
lowerCamelCase_ =hidden_states[None, :].reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCamelCase_ =self.norm(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =self.proj_in(SCREAMING_SNAKE_CASE_ )
# 2. Blocks
for block in self.transformer_blocks:
lowerCamelCase_ =block(
SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , cross_attention_kwargs=SCREAMING_SNAKE_CASE_ , class_labels=SCREAMING_SNAKE_CASE_ , )
# 3. Output
lowerCamelCase_ =self.proj_out(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =(
hidden_states[None, None, :]
.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCamelCase_ =hidden_states.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=SCREAMING_SNAKE_CASE_ )
| 365
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( _A : List[Any] , _A : Tuple , _A : Union[str, Any] , _A : Optional[Any] ) ->Any:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase_ =BigBirdConfig.from_json_file(_A )
print(f'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
lowerCamelCase_ =BigBirdForQuestionAnswering(_A )
else:
lowerCamelCase_ =BigBirdForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_A , _A , is_trivia_qa=_A )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_A )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
__A : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 49
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class A( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , A_ : int , A_ : List[str]=7 , A_ : Dict=3 , A_ : List[str]=10 , A_ : str=18 , A_ : str=30 , A_ : List[str]=400 , A_ : Tuple=True , A_ : Dict=None , A_ : Optional[int]=True , A_ : Any=[0.5, 0.5, 0.5] , A_ : Any=[0.5, 0.5, 0.5] , A_ : Optional[Any]=None , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = size if size is not None else {'shortest_edge': 18}
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = num_frames
lowerCamelCase_ = image_size
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = crop_size
def a__ ( self : str ) -> Any:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = VivitImageProcessor if is_vision_available() else None
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = VivitImageProcessingTester(self )
@property
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'do_center_crop' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
lowerCamelCase_ = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 204
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : str , A_ : TransformeraDModel , A_ : AutoencoderKL , A_ : KarrasDiffusionSchedulers , A_ : Optional[Dict[int, str]] = None , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(transformer=A_ , vae=A_ , scheduler=A_ )
# create a imagenet -> id dictionary for easier use
lowerCamelCase_ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
lowerCamelCase_ = int(A_ )
lowerCamelCase_ = dict(sorted(self.labels.items() ) )
def a__ ( self : Optional[int] , A_ : Union[str, List[str]] ) -> List[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
lowerCamelCase_ = list(A_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Any , A_ : List[int] , A_ : float = 4.0 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : int = 50 , A_ : Optional[str] = "pil" , A_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowerCamelCase_ = len(A_ )
lowerCamelCase_ = self.transformer.config.sample_size
lowerCamelCase_ = self.transformer.config.in_channels
lowerCamelCase_ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=A_ , device=self.device , dtype=self.transformer.dtype , )
lowerCamelCase_ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowerCamelCase_ = torch.tensor(A_ , device=self.device ).reshape(-1 )
lowerCamelCase_ = torch.tensor([1000] * batch_size , device=self.device )
lowerCamelCase_ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowerCamelCase_ = latent_model_input[: len(A_ ) // 2]
lowerCamelCase_ = torch.cat([half, half] , dim=0 )
lowerCamelCase_ = self.scheduler.scale_model_input(A_ , A_ )
lowerCamelCase_ = t
if not torch.is_tensor(A_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowerCamelCase_ = latent_model_input.device.type == 'mps'
if isinstance(A_ , A_ ):
lowerCamelCase_ = torch.floataa if is_mps else torch.floataa
else:
lowerCamelCase_ = torch.intaa if is_mps else torch.intaa
lowerCamelCase_ = torch.tensor([timesteps] , dtype=A_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowerCamelCase_ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCamelCase_ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowerCamelCase_ = self.transformer(
A_ , timestep=A_ , class_labels=A_ ).sample
# perform guidance
if guidance_scale > 1:
lowerCamelCase_ , lowerCamelCase_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowerCamelCase_ , lowerCamelCase_ = torch.split(A_ , len(A_ ) // 2 , dim=0 )
lowerCamelCase_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowerCamelCase_ = torch.cat([half_eps, half_eps] , dim=0 )
lowerCamelCase_ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowerCamelCase_ , lowerCamelCase_ = torch.split(A_ , A_ , dim=1 )
else:
lowerCamelCase_ = noise_pred
# compute previous image: x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(A_ , A_ , A_ ).prev_sample
if guidance_scale > 1:
lowerCamelCase_ , lowerCamelCase_ = latent_model_input.chunk(2 , dim=0 )
else:
lowerCamelCase_ = latent_model_input
lowerCamelCase_ = 1 / self.vae.config.scaling_factor * latents
lowerCamelCase_ = self.vae.decode(A_ ).sample
lowerCamelCase_ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(A_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=A_ )
| 204
| 1
|
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> list[int]:
"""simple docstring"""
_snake_case = int(_UpperCamelCase )
# Initialize Result
_snake_case = []
# Traverse through all denomination
for denomination in reversed(_UpperCamelCase ):
# Find denominations
while int(_UpperCamelCase ) >= int(_UpperCamelCase ):
total_value -= int(_UpperCamelCase )
answer.append(_UpperCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__A = []
__A = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__A = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
__A = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__A = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
__A = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f'''Following is minimal change for {value}: ''')
__A = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 354
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , A__ : int , A__ : List[str]=7 , A__ : Tuple=3 , A__ : List[str]=10 , A__ : Optional[int]=18 , A__ : int=30 , A__ : Tuple=400 , A__ : Dict=True , A__ : str=None , A__ : str=True , A__ : List[str]=[0.5, 0.5, 0.5] , A__ : int=[0.5, 0.5, 0.5] , A__ : List[Any]=None , ) -> int:
_snake_case = size if size is not None else {'''shortest_edge''': 18}
_snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = num_frames
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = do_normalize
_snake_case = image_mean
_snake_case = image_std
_snake_case = crop_size
def UpperCamelCase_ ( self : List[str] ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : Tuple = VivitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Union[str, Any] ) -> List[str]:
_snake_case = VivitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Optional[int] ) -> Optional[Any]:
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , '''image_mean''' ) )
self.assertTrue(hasattr(A__ , '''image_std''' ) )
self.assertTrue(hasattr(A__ , '''do_normalize''' ) )
self.assertTrue(hasattr(A__ , '''do_resize''' ) )
self.assertTrue(hasattr(A__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(A__ , '''size''' ) )
def UpperCamelCase_ ( self : int ) -> List[Any]:
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase_ ( self : List[Any] ) -> Optional[Any]:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_snake_case = prepare_video_inputs(self.image_processor_tester , equal_resolution=A__ )
for video in video_inputs:
self.assertIsInstance(A__ , A__ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_snake_case = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self : Any ) -> List[str]:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_video_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for video in video_inputs:
self.assertIsInstance(A__ , A__ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_snake_case = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self : Optional[Any] ) -> int:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_video_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for video in video_inputs:
self.assertIsInstance(A__ , A__ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_snake_case = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 278
| 0
|
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase = 16
lowerCamelCase = 32
def lowerCamelCase_ ( _a , _a = 16 ):
"""simple docstring"""
lowerCAmelCase__ : int = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_a ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : Tuple = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_a , max_length=_a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ : Any = datasets.map(
_a , batched=_a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : Dict = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ : Optional[int] = 8
else:
lowerCAmelCase__ : List[Any] = None
return tokenizer.pad(
_a , padding='''longest''' , max_length=_a , pad_to_multiple_of=_a , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCAmelCase__ : Optional[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=_a , collate_fn=_a , batch_size=_a , drop_last=_a )
lowerCAmelCase__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_a , collate_fn=_a , batch_size=_a , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : Optional[int] = config['''lr''']
lowerCAmelCase__ : str = int(config['''num_epochs'''] )
lowerCAmelCase__ : List[Any] = int(config['''seed'''] )
lowerCAmelCase__ : Optional[Any] = int(config['''batch_size'''] )
lowerCAmelCase__ : List[str] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ : Dict = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ : Any = MAX_GPU_BATCH_SIZE
set_seed(_a )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = get_dataloaders(_a , _a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : Any = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ : Optional[int] = AdamW(params=model.parameters() , lr=_a )
# Instantiate scheduler
lowerCAmelCase__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=_a , num_warmup_steps=100 , num_training_steps=(len(_a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = accelerator.prepare(
_a , _a , _a , _a , _a )
# Now we train the model
for epoch in range(_a ):
model.train()
for step, batch in enumerate(_a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ : Any = model(**_a )
lowerCAmelCase__ : Optional[Any] = outputs.loss
lowerCAmelCase__ : Dict = loss / gradient_accumulation_steps
accelerator.backward(_a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ : str = model(**_a )
lowerCAmelCase__ : Optional[Any] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_a , references=_a , )
lowerCAmelCase__ : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , _a )
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_a , default=_a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowerCAmelCase__ : List[str] = parser.parse_args()
lowerCAmelCase__ : Tuple = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_a , _a )
if __name__ == "__main__":
main()
| 131
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _a :
_a : Dict = None
def UpperCAmelCase__( self : Union[str, Any] )-> int:
lowerCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ : List[str] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[Any] )-> Dict:
lowerCAmelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , '''feat_extract.json''' )
feat_extract_first.to_json_file(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = self.feature_extraction_class.from_json_file(_SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase__( self : Optional[int] )-> Union[str, Any]:
lowerCAmelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Union[str, Any] = feat_extract_first.save_pretrained(_SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase__( self : int )-> str:
lowerCAmelCase__ : Optional[Any] = self.feature_extraction_class()
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 131
| 1
|
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a = None , _a = None , _a = None , _a = False , _a = False , _a = None , _a = None , **_a , ):
super().__init__(
_a , split=_a , features=_a , cache_dir=_a , keep_in_memory=_a , streaming=_a , num_proc=_a , **_a , )
__a = field
__a = path_or_paths if isinstance(_a , _a ) else {self.split: path_or_paths}
__a = Json(
cache_dir=_a , data_files=_a , features=_a , field=_a , **_a , )
def __UpperCAmelCase ( self ):
# Build iterable dataset
if self.streaming:
__a = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__a = None
__a = None
__a = None
__a = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , num_proc=self.num_proc , )
__a = self.builder.as_dataset(
split=self.split , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a = None , _a = None , **_a , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
__a = dataset
__a = path_or_buf
__a = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__a = num_proc
__a = '''utf-8'''
__a = to_json_kwargs
def __UpperCAmelCase ( self ):
__a = self.to_json_kwargs.pop('''path_or_buf''' , _a )
__a = self.to_json_kwargs.pop('''orient''' , '''records''' )
__a = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
__a = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
__a = self.to_json_kwargs.pop('''compression''' , _a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=_a ) as buffer:
__a = self._write(file_obj=_a , orient=_a , lines=_a , index=_a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
''' was passed. Please provide a local path instead.''' )
__a = self._write(
file_obj=self.path_or_buf , orient=_a , lines=_a , index=_a , **self.to_json_kwargs )
return written
def __UpperCAmelCase ( self , _a ):
__a , __a , __a , __a , __a = args
__a = query_table(
table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , )
__a = batch.to_pandas().to_json(
path_or_buf=_a , orient=_a , lines=_a , index=_a , **_a )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def __UpperCAmelCase ( self , _a , _a , _a , _a , **_a , ):
__a = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
__a = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_a )
else:
__a , __a = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(_a )
return written
| 364
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['image_processor', 'tokenizer']
__UpperCAmelCase : str = 'LayoutLMv3ImageProcessor'
__UpperCAmelCase : Optional[int] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self , _a=None , _a=None , **_a ):
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = None , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
__a = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
__a = [text] # add batch dimension (as the image processor always adds a batch dimension)
__a = features['''words''']
__a = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
__a = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__a = self.get_overflowing_images(_a , encoded_inputs['''overflow_to_sample_mapping'''] )
__a = images
return encoded_inputs
def __UpperCAmelCase ( self , _a , _a ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(_a )} and {len(_a )}''' )
return images_with_overflow
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@property
def __UpperCAmelCase ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 11
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.